diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 00000000..7adfc60d --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,232 @@ +{ + "ImportPath": "github.com/runcom/skopeo", + "GoVersion": "go1.5.3", + "Deps": [ + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.8.7-55-gf7f79f7", + "Rev": "f7f79f729e0fbe2fcc061db48a9ba0263f588252" + }, + { + "ImportPath": "github.com/codegangsta/cli", + "Comment": "1.2.0-187-gc31a797", + "Rev": "c31a7975863e7810c92e2e288a9ab074f9a88f29" + }, + { + "ImportPath": "github.com/docker/distribution", + "Comment": "v2.2.0-190-g47a064d", + "Rev": "47a064d4195a9b56133891bbb13620c3ac83a827" + }, + { + "ImportPath": "github.com/docker/docker/api", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/daemon/graphdriver", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/distribution", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/dockerversion", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/image", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/layer", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/opts", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/archive", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/chrootarchive", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/fileutils", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/homedir", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/httputils", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/idtools", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/ioutils", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/jsonlog", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/jsonmessage", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/mflag", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/parsers/kernel", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/pools", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/progress", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/promise", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/random", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/reexec", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/stringid", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/system", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/tarsum", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/term", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/useragent", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/pkg/version", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/reference", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/docker/registry", + "Comment": "v1.4.1-9354-gbdcc3eb", + "Rev": "bdcc3ebe4168a72a0f12d171e674905f2fb40d01" + }, + { + "ImportPath": "github.com/docker/engine-api/types", + "Comment": "v0.2.2-13-ge98ada9", + "Rev": "e98ada9b4fa351554aa7ee7cf857a2d6d26b1453" + }, + { + "ImportPath": "github.com/docker/go-connections/nat", + "Comment": "v0.1.2-2-g082e382", + "Rev": "082e382836465660fe3fd21de06624a205f986eb" + }, + { + "ImportPath": "github.com/docker/go-connections/tlsconfig", + "Comment": "v0.1.2-2-g082e382", + "Rev": "082e382836465660fe3fd21de06624a205f986eb" + }, + { + "ImportPath": "github.com/docker/go-units", + "Comment": "v0.1.0-21-g0bbddae", + "Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "1c83b3eabd45b6d76072b66b746c20815fb2872d" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "26a6070f849969ba72b72256e9f14cf519751690" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/user", + "Comment": "v0.0.4-226-gbc46574", + "Rev": "bc465742ac61aad50dec8e9769a75f01d8d8e502" + }, + { + "ImportPath": "github.com/vbatts/tar-split/archive/tar", + "Comment": "v0.9.11-1-gd50e5c9", + "Rev": "d50e5c9283da469398d84078519de569f617be6f" + }, + { + "ImportPath": "github.com/vbatts/tar-split/tar/asm", + "Comment": "v0.9.11-1-gd50e5c9", + "Rev": "d50e5c9283da469398d84078519de569f617be6f" + }, + { + "ImportPath": "github.com/vbatts/tar-split/tar/storage", + "Comment": "v0.9.11-1-gd50e5c9", + "Rev": "d50e5c9283da469398d84078519de569f617be6f" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "78e1654ef40b8b993e3355307740b1cac8f1db20" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 00000000..f037d684 --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 00000000..66be63a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 00000000..ec641142 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 + - tip +install: + - go get -t ./... diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 00000000..ecc84327 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,55 @@ +# 0.9.0 (Unreleased) + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 00000000..f090cb42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md new file mode 100644 index 00000000..55d3a8d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,365 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. + + ```go + logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) + ``` + +Third party logging formatters: + +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 00000000..dddd5f87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 00000000..9ae900bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,264 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 00000000..a1623ec0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,50 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default + log.Level = logrus.DebugLevel +} + +func main() { + defer func() { + err := recover() + if err != nil { + log.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 00000000..3187f6d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 00000000..9a0120ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 00000000..104d689f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go new file mode 100644 index 00000000..aad646ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go @@ -0,0 +1,61 @@ +package logstash + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. + + // TimestampFormat sets the format used for timestamps. + TimestampFormat string +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + fields := make(logrus.Fields) + for k, v := range entry.Data { + fields[k] = v + } + + fields["@version"] = 1 + + if f.TimestampFormat == "" { + f.TimestampFormat = logrus.DefaultTimestampFormat + } + + fields["@timestamp"] = entry.Time.Format(f.TimestampFormat) + + // set message field + v, ok := entry.Data["message"] + if ok { + fields["fields.message"] = v + } + fields["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + fields["fields.level"] = v + } + fields["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + fields["fields.type"] = v + } + fields["type"] = f.Type + } + + serialized, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 00000000..3f151cdc --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 00000000..066704b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,39 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 00000000..c59f331d --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,61 @@ +// +build !windows,!nacl,!plan9 + +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000..2ad6dc5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 00000000..2fdb2317 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,212 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + return NewEntry(logger).WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 00000000..0c09fbc2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 00000000..71f8d67a --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd openbsd netbsd dragonfly + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 00000000..a2c0b40d --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 00000000..b343b3a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 00000000..3e70bf7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris + +package logrus + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 00000000..0146845d --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 00000000..06ef2023 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,161 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + + switch value := value.(type) { + case string: + if needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", value) + } + default: + fmt.Fprint(b, value) + } + + b.WriteByte(' ') +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 00000000..1e30b1c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml b/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml new file mode 100644 index 00000000..87ba52f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +go: +- 1.0.3 +- 1.1.2 +- 1.2.2 +- 1.3.3 +- 1.4.2 +- 1.5.1 +- tip + +matrix: + allow_failures: + - go: tip + +script: +- go vet ./... +- go test -v ./... diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE new file mode 100644 index 00000000..5515ccfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2013 Jeremy Saenz +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md b/Godeps/_workspace/src/github.com/codegangsta/cli/README.md new file mode 100644 index 00000000..26a18386 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/README.md @@ -0,0 +1,341 @@ +[![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli) +[![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) +[![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli) + +# cli.go +`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. + +## Overview +Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. + +**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive! + +## Installation +Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). + +To install `cli.go`, simply run: +``` +$ go get github.com/codegangsta/cli +``` + +Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +## Getting Started +One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`. + +``` go +package main + +import ( + "os" + "github.com/codegangsta/cli" +) + +func main() { + cli.NewApp().Run(os.Args) +} +``` + +This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: + +``` go +package main + +import ( + "os" + "github.com/codegangsta/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Action = func(c *cli.Context) { + println("boom! I say!") + } + + app.Run(os.Args) +} +``` + +Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below. + +## Example + +Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! + +Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: + +``` go +package main + +import ( + "os" + "github.com/codegangsta/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "greet" + app.Usage = "fight the loneliness!" + app.Action = func(c *cli.Context) { + println("Hello friend!") + } + + app.Run(os.Args) +} +``` + +Install our command to the `$GOPATH/bin` directory: + +``` +$ go install +``` + +Finally run our new command: + +``` +$ greet +Hello friend! +``` + +`cli.go` also generates neat help text: + +``` +$ greet help +NAME: + greet - fight the loneliness! + +USAGE: + greet [global options] command [command options] [arguments...] + +VERSION: + 0.0.0 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS + --version Shows version information +``` + +### Arguments +You can lookup arguments by calling the `Args` function on `cli.Context`. + +``` go +... +app.Action = func(c *cli.Context) { + println("Hello", c.Args()[0]) +} +... +``` + +### Flags +Setting and querying flags is simple. +``` go +... +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + }, +} +app.Action = func(c *cli.Context) { + name := "someone" + if len(c.Args()) > 0 { + name = c.Args()[0] + } + if c.String("lang") == "spanish" { + println("Hola", name) + } else { + println("Hello", name) + } +} +... +``` + +You can also set a destination variable for a flag, to which the content will be scanned. +``` go +... +var language string +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, +} +app.Action = func(c *cli.Context) { + name := "someone" + if len(c.Args()) > 0 { + name = c.Args()[0] + } + if language == "spanish" { + println("Hola", name) + } else { + println("Hello", name) + } +} +... +``` + +See full list of flags at http://godoc.org/github.com/codegangsta/cli + +#### Alternate Names + +You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. + +``` go +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + }, +} +``` + +That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. + +#### Values from the Environment + +You can also have the default value set from the environment via `EnvVar`. e.g. + +``` go +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "APP_LANG", + }, +} +``` + +The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. + +``` go +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", + }, +} +``` + +### Subcommands + +Subcommands can be defined for a more git-like command line app. +```go +... +app.Commands = []cli.Command{ + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) { + println("added task: ", c.Args().First()) + }, + }, + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) { + println("completed task: ", c.Args().First()) + }, + }, + { + Name: "template", + Aliases: []string{"r"}, + Usage: "options for task templates", + Subcommands: []cli.Command{ + { + Name: "add", + Usage: "add a new template", + Action: func(c *cli.Context) { + println("new task template: ", c.Args().First()) + }, + }, + { + Name: "remove", + Usage: "remove an existing template", + Action: func(c *cli.Context) { + println("removed task template: ", c.Args().First()) + }, + }, + }, + }, +} +... +``` + +### Bash Completion + +You can enable completion commands by setting the `EnableBashCompletion` +flag on the `App` object. By default, this setting will only auto-complete to +show an app's subcommands, but you can write your own completion methods for +the App or its subcommands. +```go +... +var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} +app := cli.NewApp() +app.EnableBashCompletion = true +app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) { + println("completed task: ", c.Args().First()) + }, + BashComplete: func(c *cli.Context) { + // This will complete if no args are passed + if len(c.Args()) > 0 { + return + } + for _, t := range tasks { + fmt.Println(t) + } + }, + } +} +... +``` + +#### To Enable + +Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while +setting the `PROG` variable to the name of your program: + +`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` + +#### To Distribute + +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` + sudo cp src/bash_autocomplete /etc/bash_completion.d/ + source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). + +## Contribution Guidelines +Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. + +If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. + +If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go b/Godeps/_workspace/src/github.com/codegangsta/cli/app.go new file mode 100644 index 00000000..2f992d0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/app.go @@ -0,0 +1,334 @@ +package cli + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "time" +) + +// App is the main structure of a cli application. It is recomended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide built-in version flag + HideVersion bool + // An action to execute when the bash-completion flag is set + BashComplete func(context *Context) + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before func(context *Context) error + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After func(context *Context) error + // The action to execute when no subcommands are specified + Action func(context *Context) + // Execute this function if the proper command cannot be found + CommandNotFound func(context *Context, command string) + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) + Author string + // Email of Author (Note: Use App.Authors, this is deprecated) + Email string + // Writer writer to write output to + Writer io.Writer +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: path.Base(os.Args[0]), + HelpName: path.Base(os.Args[0]), + Usage: "A new cli application", + Version: "0.0.0", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + // append help to commands + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + + //append version/help flags + if a.EnableBashCompletion { + a.appendFlag(BashCompletionFlag) + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + // parse flags + set := flagSet(a.Name, a.Flags) + set.SetOutput(ioutil.Discard) + err = set.Parse(arguments[1:]) + nerr := normalizeFlags(a.Flags, set) + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + context := NewContext(a, set, nil) + ShowAppHelp(context) + return nerr + } + context := NewContext(a, set, nil) + + if checkCompletions(context) { + return nil + } + + if err != nil { + fmt.Fprintln(a.Writer, "Incorrect Usage.") + fmt.Fprintln(a.Writer) + ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + err := a.Before(context) + if err != nil { + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + a.Action(context) + return nil +} + +// Another entry point to the cli app, takes care of passing arguments and error handling +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + // append flags + if a.EnableBashCompletion { + a.appendFlag(BashCompletionFlag) + } + + // parse flags + set := flagSet(a.Name, a.Flags) + set.SetOutput(ioutil.Discard) + err = set.Parse(ctx.Args().Tail()) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + ShowSubcommandHelp(context) + } else { + ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + fmt.Fprintln(a.Writer, "Incorrect Usage.") + fmt.Fprintln(a.Writer) + ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + err := a.Before(context) + if err != nil { + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + a.Action(context) + + return nil +} + +// Returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = "<" + a.Email + "> " + } + + return fmt.Sprintf("%v %v", a.Name, e) +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete new file mode 100644 index 00000000..21a232f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete @@ -0,0 +1,14 @@ +#! /bin/bash + +: ${PROG:=$(basename ${BASH_SOURCE})} + +_cli_bash_autocomplete() { + local cur opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + } + + complete -F _cli_bash_autocomplete $PROG diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete new file mode 100644 index 00000000..5430a18f --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete @@ -0,0 +1,5 @@ +autoload -U compinit && compinit +autoload -U bashcompinit && bashcompinit + +script_dir=$(dirname $0) +source ${script_dir}/bash_autocomplete diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go b/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go new file mode 100644 index 00000000..31dc9124 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go @@ -0,0 +1,40 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) { +// println("Greetings") +// } +// +// app.Run(os.Args) +// } +package cli + +import ( + "strings" +) + +type MultiError struct { + Errors []error +} + +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go b/Godeps/_workspace/src/github.com/codegangsta/cli/command.go new file mode 100644 index 00000000..824e77ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/command.go @@ -0,0 +1,216 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character (deprecated, use `Aliases`) + ShortName string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The function to call when checking for bash command completions + BashComplete func(context *Context) + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before func(context *Context) error + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After func(context *Context) error + // The function to call when this command is invoked + Action func(context *Context) + // List of child commands + Subcommands []Command + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command + HideHelp bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string +} + +// Returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) error { + if len(c.Subcommands) > 0 || c.Before != nil || c.After != nil { + return c.startApp(ctx) + } + + if !c.HideHelp && (HelpFlag != BoolFlag{}) { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + if ctx.App.EnableBashCompletion { + c.Flags = append(c.Flags, BashCompletionFlag) + } + + set := flagSet(c.Name, c.Flags) + set.SetOutput(ioutil.Discard) + + var err error + if !c.SkipFlagParsing { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } + } + + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + } else { + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } + } + + if err != nil { + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return err + } + + nerr := normalizeFlags(c.Flags, set) + if nerr != nil { + fmt.Fprintln(ctx.App.Writer, nerr) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return nerr + } + context := NewContext(ctx.App, set, ctx) + + if checkCommandCompletions(context, c.Name) { + return nil + } + + if checkCommandHelp(context, c.Name) { + return nil + } + context.Command = c + c.Action(context) + return nil +} + +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// Returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + } + + if c.Description != "" { + app.Usage = c.Description + } else { + app.Usage = c.Usage + } + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + + var newCmds []Command + for _, cc := range app.Commands { + cc.commandNamePath = []string{c.Name, cc.Name} + newCmds = append(newCmds, cc) + } + app.Commands = newCmds + + return app.RunAsSubcommand(ctx) +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go b/Godeps/_workspace/src/github.com/codegangsta/cli/context.go new file mode 100644 index 00000000..0513d34f --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/context.go @@ -0,0 +1,388 @@ +package cli + +import ( + "errors" + "flag" + "strconv" + "strings" + "time" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + flagSet *flag.FlagSet + setFlags map[string]bool + globalSetFlags map[string]bool + parentContext *Context +} + +// Creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + return &Context{App: app, flagSet: set, parentContext: parentCtx} +} + +// Looks up the value of a local int flag, returns 0 if no int flag exists +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// Looks up the value of a local bool flag, returns false if no bool flag exists +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// Looks up the value of a local boolT flag, returns false if no bool flag exists +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// Looks up the value of a local string flag, returns "" if no string flag exists +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// Looks up the value of a local string slice flag, returns nil if no string slice flag exists +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// Looks up the value of a local int slice flag, returns nil if no int slice flag exists +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// Looks up the value of a local generic flag, returns nil if no generic flag exists +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// Looks up the value of a global int flag, returns 0 if no int flag exists +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +// Looks up the value of a global bool flag, returns false if no bool flag exists +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +// Looks up the value of a global string flag, returns "" if no string flag exists +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +// Looks up the value of a global string slice flag, returns nil if no string slice flag exists +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +// Looks up the value of a global int slice flag, returns nil if no int slice flag exists +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +// Looks up the value of a global generic flag, returns nil if no generic flag exists +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +// Returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + } + return c.setFlags[name] == true +} + +// Determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + if c.globalSetFlags == nil { + c.globalSetFlags = make(map[string]bool) + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext { + ctx.flagSet.Visit(func(f *flag.Flag) { + c.globalSetFlags[f.Name] = true + }) + } + } + return c.globalSetFlags[name] +} + +// Returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, flag := range c.Command.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// Returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, flag := range c.App.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +type Args []string + +// Returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// Returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// Returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Return the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + val, err := strconv.Atoi(f.Value.String()) + if err != nil { + return 0 + } + return val + } + + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + val, err := time.ParseDuration(f.Value.String()) + if err == nil { + return val + } + } + + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + val, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return val + } + + return 0 +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + return f.Value.String() + } + + return "" +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*StringSlice)).Value() + + } + + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*IntSlice)).Value() + + } + + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + return f.Value + } + return nil +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + val, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return val + } + + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + val, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return true + } + return val + } + + return false +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.GetName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go b/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go new file mode 100644 index 00000000..49f30994 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go @@ -0,0 +1,527 @@ +package cli + +import ( + "flag" + "fmt" + "os" + "strconv" + "strings" + "time" +) + +// This flag enables bash-completion for all commands and subcommands +var BashCompletionFlag = BoolFlag{ + Name: "generate-bash-completion", +} + +// This flag prints the version for the application +var VersionFlag = BoolFlag{ + Name: "version, v", + Usage: "print the version", +} + +// This flag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) +var HelpFlag = BoolFlag{ + Name: "help, h", + Usage: "show help", +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recomended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + GetName() string +} + +func flagSet(name string, flags []Flag) *flag.FlagSet { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + f.Apply(set) + } + return set +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// GenericFlag is the flag type for types implementing Generic +type GenericFlag struct { + Name string + Value Generic + Usage string + EnvVar string +} + +// String returns the string representation of the generic flag to display the +// help text to the user (uses the String() method of the generic flag to show +// the value) +func (f GenericFlag) String() string { + return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s \"%v\"\t%v", prefixFor(f.Name), f.Name, f.Value, f.Usage)) +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) Apply(set *flag.FlagSet) { + val := f.Value + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + val.Set(envVal) + break + } + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) +} + +func (f GenericFlag) GetName() string { + return f.Name +} + +// StringSlice is an opaque type for []string to satisfy flag.Value +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// StringSlice is a string flag that can be specified multiple times on the +// command-line +type StringSliceFlag struct { + Name string + Value *StringSlice + Usage string + EnvVar string +} + +// String returns the usage +func (f StringSliceFlag) String() string { + firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") + pref := prefixFor(firstName) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + newVal.Set(s) + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) +} + +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// StringSlice is an opaque type for []int to satisfy flag.Value +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } else { + *f = append(*f, tmp) + } + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + return fmt.Sprintf("%d", *f) +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// IntSliceFlag is an int flag that can be specified multiple times on the +// command-line +type IntSliceFlag struct { + Name string + Value *IntSlice + Usage string + EnvVar string +} + +// String returns the usage +func (f IntSliceFlag) String() string { + firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") + pref := prefixFor(firstName) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + err := newVal.Set(s) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) +} + +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// BoolFlag is a switch that defaults to false +type BoolFlag struct { + Name string + Usage string + EnvVar string + Destination *bool +} + +// String returns a readable representation of this value (for usage defaults) +func (f BoolFlag) String() string { + return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f BoolFlag) Apply(set *flag.FlagSet) { + val := false + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool + } + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) +} + +func (f BoolFlag) GetName() string { + return f.Name +} + +// BoolTFlag this represents a boolean flag that is true by default, but can +// still be set to false by --some-flag=false +type BoolTFlag struct { + Name string + Usage string + EnvVar string + Destination *bool +} + +// String returns a readable representation of this value (for usage defaults) +func (f BoolTFlag) String() string { + return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f BoolTFlag) Apply(set *flag.FlagSet) { + val := true + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) +} + +func (f BoolTFlag) GetName() string { + return f.Name +} + +// StringFlag represents a flag that takes as string value +type StringFlag struct { + Name string + Value string + Usage string + EnvVar string + Destination *string +} + +// String returns the usage +func (f StringFlag) String() string { + var fmtString string + fmtString = "%s %v\t%v" + + if len(f.Value) > 0 { + fmtString = "%s \"%v\"\t%v" + } else { + fmtString = "%s %v\t%v" + } + + return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f StringFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + f.Value = envVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) +} + +func (f StringFlag) GetName() string { + return f.Name +} + +// IntFlag is a flag that takes an integer +// Errors if the value provided cannot be parsed +type IntFlag struct { + Name string + Value int + Usage string + EnvVar string + Destination *int +} + +// String returns the usage +func (f IntFlag) String() string { + return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f IntFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err == nil { + f.Value = int(envValInt) + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) +} + +func (f IntFlag) GetName() string { + return f.Name +} + +// DurationFlag is a flag that takes a duration specified in Go's duration +// format: https://golang.org/pkg/time/#ParseDuration +type DurationFlag struct { + Name string + Value time.Duration + Usage string + EnvVar string + Destination *time.Duration +} + +// String returns a readable representation of this value (for usage defaults) +func (f DurationFlag) String() string { + return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f DurationFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValDuration, err := time.ParseDuration(envVal) + if err == nil { + f.Value = envValDuration + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) +} + +func (f DurationFlag) GetName() string { + return f.Name +} + +// Float64Flag is a flag that takes an float value +// Errors if the value provided cannot be parsed +type Float64Flag struct { + Name string + Value float64 + Usage string + EnvVar string + Destination *float64 +} + +// String returns the usage +func (f Float64Flag) String() string { + return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) +} + +// Apply populates the flag given the flag set and environment +func (f Float64Flag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err == nil { + f.Value = float64(envValFloat) + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) +} + +func (f Float64Flag) GetName() string { + return f.Name +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +func prefixedNames(fullName string) (prefixed string) { + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if i < len(parts)-1 { + prefixed += ", " + } + } + return +} + +func withEnvHint(envVar, str string) string { + envText := "" + if envVar != "" { + envText = fmt.Sprintf(" [$%s]", strings.Join(strings.Split(envVar, ","), ", $")) + } + return str + envText +} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go b/Godeps/_workspace/src/github.com/codegangsta/cli/help.go new file mode 100644 index 00000000..a246f63a --- /dev/null +++ b/Godeps/_workspace/src/github.com/codegangsta/cli/help.go @@ -0,0 +1,246 @@ +package cli + +import ( + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" +) + +// The text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + {{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if .Version}} +VERSION: + {{.Version}} + {{end}}{{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: + {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} + {{end}}{{end}}{{if .Flags}} +GLOBAL OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}} +` + +// The text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .Flags}} + +OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{ end }} +` + +// The text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + +COMMANDS: + {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} + {{end}}{{if .Flags}} +OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{end}} +` + +var helpCommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) { + args := c.Args() + if args.Present() { + ShowCommandHelp(c, args.First()) + } else { + ShowAppHelp(c) + } + }, +} + +var helpSubcommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) { + args := c.Args() + if args.Present() { + ShowCommandHelp(c, args.First()) + } else { + ShowSubcommandHelp(c) + } + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +var HelpPrinter helpPrinter = printHelp + +// Prints version for the App +var VersionPrinter = printVersion + +func ShowAppHelp(c *Context) { + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) +} + +// Prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + for _, command := range c.App.Commands { + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) + } + } +} + +// Prints help for the given command +func ShowCommandHelp(ctx *Context, command string) { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + return + } + } + + if ctx.App.CommandNotFound != nil { + ctx.App.CommandNotFound(ctx, command) + } else { + fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) + } +} + +// Prints help for the given subcommand +func ShowSubcommandHelp(c *Context) { + ShowCommandHelp(c, c.Command.Name) +} + +// Prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// Prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// Prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil && c.BashComplete != nil { + c.BashComplete(ctx) + } +} + +func printHelp(out io.Writer, templ string, data interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + + w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + panic(err) + } + w.Flush() +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.Name != "" { + eachName(VersionFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkHelp(c *Context) bool { + found := false + if HelpFlag.Name != "" { + eachName(HelpFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.GlobalBool("h") || c.GlobalBool("help") { + ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkCompletions(c *Context) bool { + if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { + ShowCompletions(c) + return true + } + + return false +} + +func checkCommandCompletions(c *Context, name string) bool { + if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { + ShowCommandCompletions(c, name) + return true + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml b/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml new file mode 100644 index 00000000..d943e19f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml @@ -0,0 +1,38 @@ +image: dmp42/go:stable + +script: + # To be spoofed back into the test image + - go get github.com/modocache/gover + + - go get -t ./... + + # Go fmt + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + # Go lint + - test -z "$(golint ./... | tee /dev/stderr)" + # Go vet + - go vet ./... + # Go test + - go test -v -race -cover ./... + # Helper to concatenate reports + - gover + # Send to coverall + - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} + + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck + +notify: + email: + recipients: + - distribution@docker.com + + slack: + team: docker + channel: "#dt" + username: mom + token: {{SLACK_TOKEN}} + on_success: true + on_failure: true diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.gitignore b/Godeps/_workspace/src/github.com/docker/distribution/.gitignore new file mode 100644 index 00000000..1c3ae0a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/.gitignore @@ -0,0 +1,37 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap new file mode 100644 index 00000000..191e60cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap @@ -0,0 +1,14 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS new file mode 100644 index 00000000..4b97cd78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS @@ -0,0 +1,104 @@ +Aaron Lehmann +Aaron Vinson +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +amitshukla +Amy Lindburg +Andrew Meredith +Andrey Kostov +Andy Goldstein +Anton Tiurin +Antonio Mercado +Arnaud Porterie +Arthur Baars +Avi Miller +Ayose Cazorla +BadZen +Ben Firshman +bin liu +Brian Bland +burnettk +Chris Dillon +Daisuke Fujita +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +David Verhasselt +David Xia +davidli +Dejan Golja +Derek McGowan +Diogo Mónica +Donald Huang +Doug Davis +farmerworking +Florentin Raud +Frederick F. Kautz IV +harche +Henri Gomez +Hu Keping +Hua Wang +Ian Babrou +Jack Griffin +Jason Freidman +Jeff Nickoloff +Jessie Frazelle +Jianqing Wang +Jon Poler +Jonathan Boulle +Jordan Liggitt +Josh Hawn +Julien Fernandez +Kelsey Hightower +Kenneth Lim +Li Yi +Louis Kottmann +Luke Carpenter +Mary Anthony +Matt Bentley +Matt Moore +Matt Robenolt +Michael Prokop +Miquel Sabaté +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Patrick Devine +Philip Misiowiec +Richard Scothern +Rusty Conover +Sebastiaan van Stijn +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +Spencer Rinehart +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Troels Thomsen +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King +xg.song +xiekeyang +Yann ROBERT +yuzou diff --git a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 00000000..1a9ecb74 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,140 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry -version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile new file mode 100644 index 00000000..5329cee7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.5.3 + +RUN apt-get update && \ + apt-get install -y librados-dev apache2-utils && \ + rm -rf /var/lib/apt/lists/* + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH +ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +RUN make PREFIX=/go clean binaries + +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["/etc/docker/registry/config.yml"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json new file mode 100644 index 00000000..16100e21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json @@ -0,0 +1,181 @@ +{ + "ImportPath": "github.com/docker/distribution", + "GoVersion": "go1.4.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" + }, + { + "ImportPath": "google.golang.org/cloud", + "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" + }, + { + "ImportPath": "google.golang.org/api", + "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" + }, + { + "ImportPath": "google.golang.org/grpc", + "Rev": "91c8b79535eb6045d70ec671d302213f88a3ab95" + }, + { + "ImportPath": "github.com/bradfitz/http2", + "Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f" + }, + { + "ImportPath": "github.com/golang/protobuf", + "Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644" + }, + { + "ImportPath": "google.golang.org/cloud/storage", + "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" + }, + { + "ImportPath": "github.com/AdRoll/goamz/aws", + "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" + }, + { + "ImportPath": "github.com/AdRoll/goamz/cloudfront", + "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" + }, + { + "ImportPath": "github.com/AdRoll/goamz/s3", + "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/bugsnag/panicwrap", + "Rev": "e2c28503fcd0675329da73bf48b33404db873782" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/common", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" + }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" + }, + { + "ImportPath": "github.com/gorilla/handlers", + "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/inconshreveable/mousetrap", + "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3" + }, + { + "ImportPath": "github.com/noahdesu/go-ceph/rados", + "Comment": "v.0.3.0-29-gb15639c", + "Rev": "b15639c44c05368348355229070361395d9152ee" + }, + { + "ImportPath": "github.com/spf13/cobra", + "Rev": "312092086bed4968099259622145a0c9ae280064" + }, + { + "ImportPath": "github.com/spf13/pflag", + "Rev": "5644820622454e71517561946e3d94b9f9db6842" + }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/yvasiyarov/go-metrics", + "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" + }, + { + "ImportPath": "github.com/yvasiyarov/gorelic", + "Comment": "v0.0.6-8-ga9bba5b", + "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" + }, + { + "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", + "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" + }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" + }, + { + "ImportPath": "golang.org/x/net/trace", + "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/LICENSE b/Godeps/_workspace/src/github.com/docker/distribution/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 00000000..bda40015 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,58 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "dmcgowan", + "dmp42", + "richardscothern", + "shykes", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.richardscothern] + Name = "Richard Scothern" + Email = "richard.scothern@gmail.com" + GitHub = "richardscothern" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/Makefile new file mode 100644 index 00000000..4604a39a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/Makefile @@ -0,0 +1,74 @@ +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" + +.PHONY: clean all fmt vet lint build test binaries +.DEFAULT: default +all: AUTHORS clean fmt vet fmt lint build test binaries + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry + +${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest + +${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template + +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template + ./bin/registry-api-descriptor-template $< > $@ + +# Depends on binaries because vet will silently fail if it can't load compiled +# imports +vet: binaries + @echo "+ $@" + @go vet ./... + +fmt: + @echo "+ $@" + @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ + echo "+ please format Go code with 'gofmt -s'" + +lint: + @echo "+ $@" + @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + +build: + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... + +test: + @echo "+ $@" + @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... + +test-full: + @echo "+ $@" + @go test ./... + +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template + @echo "+ $@" + +clean: + @echo "+ $@" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/README.md b/Godeps/_workspace/src/github.com/docker/distribution/README.md new file mode 100644 index 00000000..e8262133 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/README.md @@ -0,0 +1,131 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](docs/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](docs/building.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE.md). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md b/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 00000000..9cdfa36c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to a arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/blobs.go b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go new file mode 100644 index 00000000..ce43ea2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go @@ -0,0 +1,233 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteSeeker + io.ReaderFrom + io.Closer + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error + + // Get a reader to the blob being written by this BlobWriter + Reader() (io.ReadCloser, error) +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml new file mode 100644 index 00000000..e1995d4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml @@ -0,0 +1,90 @@ +# Pony-up! +machine: + pre: + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install ceph to test rados driver & create pool + - sudo -i ~/distribution/contrib/ceph/ci-setup.sh + - ceph osd pool create docker-distribution 1 + # Install codecov for coverage + - pip install --user codecov + + post: + # go + - gvm install go1.5.3 --prefer-binary --name=stable + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" + # Workaround Circle parsing dumb bugs and/or YAML wonkyness + CIRCLE_PAIN: "mode: set" + # Ceph config + RADOS_POOL: "docker-distribution" + + hosts: + # Not used yet + fancy: 127.0.0.1 + +dependencies: + pre: + # Copy the code to the gopath of all go versions + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + override: + # Install dependencies for every copied clone/go version + - gvm use stable && go get github.com/tools/godep: + pwd: $BASE_STABLE + + post: + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + +test: + pre: + # Output the go versions we are going to test + # - gvm use old && go version + - gvm use stable && go version + + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && godep go install ./...: + pwd: $BASE_STABLE + + # FMT + - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + pwd: $BASE_STABLE + + # VET + - gvm use stable && go vet ./...: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + pwd: $BASE_STABLE + + override: + # Test stable, and report + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 600 + pwd: $BASE_STABLE + + post: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): + pwd: $BASE_STABLE + + ## Notes + # Disabled the -race detector due to massive memory usage. + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go new file mode 100644 index 00000000..49426a88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go @@ -0,0 +1,97 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/version" +) + +var ( + algorithm = digest.Canonical + showVersion bool +) + +type job struct { + name string + reader io.Reader +} + +func init() { + flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") + flag.Var(&algorithm, "algorithm", "select the digest algorithm") + flag.BoolVar(&showVersion, "version", false, "show the version and exit") + + log.SetFlags(0) + log.SetPrefix(os.Args[0] + ": ") +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) + fmt.Fprintf(os.Stderr, ` +Calculate the digest of one or more input files, emitting the result +to standard out. If no files are provided, the digest of stdin will +be calculated. + +`) + flag.PrintDefaults() +} + +func unsupported() { + log.Fatalf("unsupported digest algorithm: %v", algorithm) +} + +func main() { + var jobs []job + + flag.Usage = usage + flag.Parse() + if showVersion { + version.PrintVersion() + return + } + + var fail bool // if we fail on one item, foul the exit code + if flag.NArg() > 0 { + for _, path := range flag.Args() { + fp, err := os.Open(path) + + if err != nil { + log.Printf("%s: %v", path, err) + fail = true + continue + } + defer fp.Close() + + jobs = append(jobs, job{name: path, reader: fp}) + } + } else { + // just read stdin + jobs = append(jobs, job{name: "-", reader: os.Stdin}) + } + + digestFn := algorithm.FromReader + + if !algorithm.Available() { + unsupported() + } + + for _, job := range jobs { + dgst, err := digestFn(job.reader) + if err != nil { + log.Printf("%s: %v", job.name, err) + fail = true + continue + } + + fmt.Printf("%v\t%s\n", dgst, job.name) + } + + if fail { + os.Exit(1) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go new file mode 100644 index 00000000..e9cbc42a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go @@ -0,0 +1,131 @@ +// registry-api-descriptor-template uses the APIDescriptor defined in the +// api/v2 package to execute templates passed to the command line. +// +// For example, to generate a new API specification, one would execute the +// following command from the repo root: +// +// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md +// +// The templates are passed in the api/v2.APIDescriptor object. Please see the +// package documentation for fields available on that object. The template +// syntax is from Go's standard library text/template package. For information +// on Go's template syntax, please see golang.org/pkg/text/template. +package main + +import ( + "log" + "net/http" + "os" + "path/filepath" + "regexp" + "text/template" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" +) + +var spaceRegex = regexp.MustCompile(`\n\s*`) + +func main() { + + if len(os.Args) != 2 { + log.Fatalln("please specify a template to execute.") + } + + path := os.Args[1] + filename := filepath.Base(path) + + funcMap := template.FuncMap{ + "removenewlines": func(s string) string { + return spaceRegex.ReplaceAllString(s, " ") + }, + "statustext": http.StatusText, + "prettygorilla": prettyGorillaMuxPath, + } + + tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) + + data := struct { + RouteDescriptors []v2.RouteDescriptor + ErrorDescriptors []errcode.ErrorDescriptor + }{ + RouteDescriptors: v2.APIDescriptor.RouteDescriptors, + ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), + // The following are part of the specification but provided by errcode default. + errcode.ErrorCodeUnauthorized.Descriptor(), + errcode.ErrorCodeDenied.Descriptor(), + errcode.ErrorCodeUnsupported.Descriptor()), + } + + if err := tmpl.Execute(os.Stdout, data); err != nil { + log.Fatalln(err) + } +} + +// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux +// route string, making it suitable for documentation. +func prettyGorillaMuxPath(s string) string { + // Stateful parser that removes regular expressions from gorilla + // routes. It correctly handles balanced bracket pairs. + + var output string + var label string + var level int + +start: + if s[0] == '{' { + s = s[1:] + level++ + goto capture + } + + output += string(s[0]) + s = s[1:] + + goto end +capture: + switch s[0] { + case '{': + level++ + case '}': + level-- + + if level == 0 { + s = s[1:] + goto label + } + case ':': + s = s[1:] + goto skip + default: + label += string(s[0]) + } + s = s[1:] + goto capture +skip: + switch s[0] { + case '{': + level++ + case '}': + level-- + } + s = s[1:] + + if level == 0 { + goto label + } + + goto skip +label: + if label != "" { + output += "<" + label + ">" + label = "" + } +end: + if s != "" { + goto start + } + + return output + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml new file mode 100644 index 00000000..7a274ea5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml @@ -0,0 +1,55 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development +storage: + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry-cache + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-8082 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +proxy: + remoteurl: https://registry-1.docker.io + username: username + password: password +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml new file mode 100644 index 00000000..b6438be5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml @@ -0,0 +1,66 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml new file mode 100644 index 00000000..b5700e19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml @@ -0,0 +1,18 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry +http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go new file mode 100644 index 00000000..603a44a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go @@ -0,0 +1,23 @@ +package main + +import ( + _ "net/http/pprof" + + "github.com/docker/distribution/registry" + _ "github.com/docker/distribution/registry/auth/htpasswd" + _ "github.com/docker/distribution/registry/auth/silly" + _ "github.com/docker/distribution/registry/auth/token" + _ "github.com/docker/distribution/registry/proxy" + _ "github.com/docker/distribution/registry/storage/driver/azure" + _ "github.com/docker/distribution/registry/storage/driver/filesystem" + _ "github.com/docker/distribution/registry/storage/driver/gcs" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/oss" + _ "github.com/docker/distribution/registry/storage/driver/s3" + _ "github.com/docker/distribution/registry/storage/driver/swift" +) + +func main() { + registry.Cmd.Execute() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go new file mode 100644 index 00000000..e7ea770a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go @@ -0,0 +1,5 @@ +// +build include_rados + +package main + +import _ "github.com/docker/distribution/registry/storage/driver/rados" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go new file mode 100644 index 00000000..3dff32f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go @@ -0,0 +1,571 @@ +package configuration + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables. +// +// Note that yaml field names should never include _ characters, since this is the separator used +// in environment variable names. +type Configuration struct { + // Version is the version which defines the format of the rest of the configuration + Version Version `yaml:"version"` + + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configurate the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. This is + // deprecated. Please use Log.Level in the future. + Loglevel Loglevel `yaml:"loglevel,omitempty"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` + + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth,omitempty"` + + // Middleware lists all middlewares to be used by the registry. + Middleware map[string][]Middleware `yaml:"middleware,omitempty"` + + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting,omitempty"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + // Host specifies an externally-reachable address for the registry, as a fully + // qualified URL. + Host string `yaml:"host,omitempty"` + + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + } `yaml:"tls,omitempty"` + + // Headers is a set of headers to include in HTTP responses. A common + // use case for this would be security headers such as + // Strict-Transport-Security. The map keys are the header names, and + // the values are the associated header payloads. + Headers http.Header `yaml:"headers,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + } `yaml:"http,omitempty"` + + // Notifications specifies configuration about various endpoint to which + // registry events are dispatched. + Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` + + Health Health `yaml:"health,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configurate email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure cerification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` +} + +// FileChecker is a type of entry in the health section for checking files. +type FileChecker struct { + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // File is the path to check + File string `yaml:"file,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// HTTPChecker is a type of entry in the health section for checking HTTP URIs. +type HTTPChecker struct { + // Timeout is the duration to wait before timing out the HTTP request + Timeout time.Duration `yaml:"interval,omitempty"` + // StatusCode is the expected status code + StatusCode int + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // URI is the HTTP URI to check + URI string `yaml:"uri,omitempty"` + // Headers lists static headers that should be added to all requests + Headers http.Header `yaml:"headers"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// TCPChecker is a type of entry in the health section for checking TCP servers. +type TCPChecker struct { + // Timeout is the duration to wait before timing out the TCP connection + Timeout time.Duration `yaml:"interval,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Addr is the TCP address to check + Addr string `yaml:"addr,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// Health provides the configuration section for health checks. +type Health struct { + // FileCheckers is a list of paths to check + FileCheckers []FileChecker `yaml:"file,omitempty"` + // HTTPCheckers is a list of URIs to check + HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` + // TCPCheckers is a list of URIs to check + TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` + // StorageDriver configures a health check on the configured storage + // driver + StorageDriver struct { + // Enabled turns on the health check for the storage driver + Enabled bool `yaml:"enabled,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` + } `yaml:"storagedriver,omitempty"` +} + +// v0_1Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v0_1Configuration Configuration + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil +} + +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + +// Storage defines the configuration for registry object storage +type Storage map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + var storageType []string + + // Return only key in this map + for k := range storage { + switch k { + case "maintenance": + // allow configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + storageType = append(storageType, k) + } + } + if len(storageType) > 1 { + panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) + } + if len(storageType) == 1 { + return storageType[0] + } + return "" +} + +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key string, value interface{}) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + switch k { + case "maintenance": + // allow for configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (storage Storage) MarshalYAML() (interface{}, error) { + if storage.Parameters() == nil { + return storage.Type(), nil + } + return map[string]Parameters(storage), nil +} + +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} + +// Notifications configures multiple http endpoints. +type Notifications struct { + // Endpoints is a list of http configurations for endpoints that + // respond to webhook notifications. In the future, we may allow other + // kinds of endpoints, such as external queues. + Endpoints []Endpoint `yaml:"endpoints,omitempty"` +} + +// Endpoint describes the configuration of an http webhook notification +// endpoint. +type Endpoint struct { + Name string `yaml:"name"` // identifies the endpoint in the registry instance. + Disabled bool `yaml:"disabled"` // disables the endpoint + URL string `yaml:"url"` // post url for the endpoint. + Headers http.Header `yaml:"headers"` // static headers that should be added to all requests + Timeout time.Duration `yaml:"timeout"` // HTTP timeout + Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure + Backoff time.Duration `yaml:"backoff"` // backoff duration +} + +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey,omitempty"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage,omitempty"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint,omitempty"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey,omitempty"` + // Name is the component name of the registry in NewRelic + Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` +} + +// Middleware configures named middlewares to be applied at injection points. +type Middleware struct { + // Name the middleware registers itself as + Name string `yaml:"name"` + // Flag to disable middleware easily + Disabled bool `yaml:"disabled,omitempty"` + // Map of parameters that will be passed to the middleware's initialization function + Options Parameters `yaml:"options"` +} + +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + +// Parse parses an input configuration yaml document into a Configuration struct +// This should generally be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Loglevel == Loglevel("") { + v0_1.Loglevel = Loglevel("info") + } + if v0_1.Storage.Type() == "" { + return nil, fmt.Errorf("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) + + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go new file mode 100644 index 00000000..8b81dd5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go @@ -0,0 +1,283 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +type envVar struct { + name string + value string +} + +type envVars []envVar + +func (a envVars) Len() int { return len(a) } +func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env envVars +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env = append(p.env, envVar{envParts[0], envParts[1]}) + } + + // We must sort the environment variables lexically by name so that + // more specific variables are applied before less specific ones + // (i.e. REGISTRY_STORAGE before + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a + // lot simpler and easier to get right than unmarshalling map entries + // into temporaries and merging with the existing entry. + sort.Sort(p.env) + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + for _, envVar := range p.env { + pathStr := envVar.name + if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { + path := strings.Split(pathStr, "_") + + err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) + if err != nil { + return err + } + } + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +// overwriteFields replaces configuration values with alternate values specified +// through the environment. Precondition: an empty path slice must never be +// passed in. +func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + panic("encountered nil pointer while handling environment variable " + fullpath) + } + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + return p.overwriteStruct(v, fullpath, path, payload) + case reflect.Map: + return p.overwriteMap(v, fullpath, path, payload) + case reflect.Interface: + if v.NumMethod() == 0 { + if !v.IsNil() { + return p.overwriteFields(v.Elem(), fullpath, path, payload) + } + // Interface was empty; create an implicit map + var template map[string]interface{} + wrappedV := reflect.MakeMap(reflect.TypeOf(template)) + v.Set(wrappedV) + return p.overwriteMap(wrappedV, fullpath, path, payload) + } + } + return nil +} + +func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { + // Generate case-insensitive map of struct fields + byUpperCase := make(map[string]int) + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) + } + byUpperCase[upper] = i + } + + fieldIndex, present := byUpperCase[path[0]] + if !present { + logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) + return nil + } + field := v.Field(fieldIndex) + sf := v.Type().Field(fieldIndex) + + if len(path) == 1 { + // Env var specifies this field directly + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) + if err != nil { + return err + } + field.Set(reflect.Indirect(fieldVal)) + return nil + } + + // If the field is nil, must create an object + switch sf.Type.Kind() { + case reflect.Map: + if field.IsNil() { + field.Set(reflect.MakeMap(sf.Type)) + } + case reflect.Ptr: + if field.IsNil() { + field.Set(reflect.New(sf.Type)) + } + } + + err := p.overwriteFields(field, fullpath, path[1:], payload) + if err != nil { + return err + } + + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { + if m.Type().Key().Kind() != reflect.String { + // non-string keys unsupported + logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) + return nil + } + + if len(path) > 1 { + // If a matching key exists, get its value and continue the + // overwriting process. + for _, k := range m.MapKeys() { + if strings.ToUpper(k.String()) == path[0] { + mapValue := m.MapIndex(k) + // If the existing value is nil, we want to + // recreate it instead of using this value. + if (mapValue.Kind() == reflect.Ptr || + mapValue.Kind() == reflect.Interface || + mapValue.Kind() == reflect.Map) && + mapValue.IsNil() { + break + } + return p.overwriteFields(mapValue, fullpath, path[1:], payload) + } + } + } + + // (Re)create this key + var mapValue reflect.Value + if m.Type().Elem().Kind() == reflect.Map { + mapValue = reflect.MakeMap(m.Type().Elem()) + } else { + mapValue = reflect.New(m.Type().Elem()) + } + if len(path) > 1 { + err := p.overwriteFields(mapValue, fullpath, path[1:], payload) + if err != nil { + return err + } + } else { + err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) + if err != nil { + return err + } + } + + m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go new file mode 100644 index 00000000..23cbf5b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go @@ -0,0 +1,85 @@ +package context + +import ( + "sync" + + "github.com/docker/distribution/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go new file mode 100644 index 00000000..6fe1f817 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go @@ -0,0 +1,89 @@ +// Package context provides several utilities for working with +// golang.org/x/net/context in http requests. Primarily, the focus is on +// logging relevent request information but this package is not limited to +// that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped varaibles. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go new file mode 100644 index 00000000..2cb1d041 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go @@ -0,0 +1,364 @@ +package context + +import ( + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx Context, r *http.Request) Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: irw, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx Context, r *http.Request) Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} + +func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + } + + return irw.instrumentedResponseWriter.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go new file mode 100644 index 00000000..fbb6a051 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go @@ -0,0 +1,116 @@ +package context + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "runtime" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) +} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value("logger") + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go new file mode 100644 index 00000000..af4f1351 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -0,0 +1,104 @@ +package context + +import ( + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapased time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go new file mode 100644 index 00000000..299edc00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go @@ -0,0 +1,32 @@ +package context + +import ( + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx Context, key interface{}) time.Duration { + startedAtI := ctx.Value(key) + if startedAtI != nil { + if startedAt, ok := startedAtI.(time.Time); ok { + return time.Since(startedAt) + } + } + + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx Context, key interface{}) (value string) { + stringi := ctx.Value(key) + if stringi != nil { + if valuev, ok := stringi.(string); ok { + value = valuev + } + } + + return value +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/version.go b/Godeps/_workspace/src/github.com/docker/distribution/context/version.go new file mode 100644 index 00000000..746cda02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/version.go @@ -0,0 +1,16 @@ +package context + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx Context, version string) Context { + ctx = WithValue(ctx, "version", version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, "version")) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx Context) string { + return GetStringValue(ctx, "version") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD new file mode 100644 index 00000000..29f6bae1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD @@ -0,0 +1,36 @@ +# Apache HTTPd sample for Registry v1, v2 and mirror + +3 containers involved + +* Docker Registry v1 (registry 0.9.1) +* Docker Registry v2 (registry 2.0.0) +* Docker Registry v1 in mirror mode + +HTTP for mirror and HTTPS for v1 & v2 + +* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode +* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode + +## 3 Docker containers should be started + +* Docker Registry 1.0 in Mirror mode : port 5001 +* Docker Registry 1.0 in Hosting mode : port 5000 +* Docker Registry 2.0 in Hosting mode : port 5002 + +### Registry v1 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" + +### Mirror + + docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ + -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" + +### Registry v2 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" + +# For Hosting mode access + +* users should have account (valid-user) to be able to fetch images +* only users using account docker-deployer will be allowed to push images diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf new file mode 100644 index 00000000..3300a7c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf @@ -0,0 +1,127 @@ +# +# Sample Apache 2.x configuration where : +# + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /_ping http://localhost:5001/_ping + ProxyPassReverse /_ping http://localhost:5001/_ping + + ProxyPass /v1 http://localhost:5001/v1 + ProxyPassReverse /v1 http://localhost:5001/v1 + + # Logs + ErrorLog ${APACHE_LOG_DIR}/mirror_error_log + CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog + + + + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt + SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key + + # Higher Strength SSL Ciphers + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLCipherSuite RC4-SHA:HIGH + SSLHonorCipherOrder on + + # Logs + ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log + CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + # + # Registry v1 + # + + ProxyPass /v1 http://localhost:5000/v1 + ProxyPassReverse /v1 http://localhost:5000/v1 + + ProxyPass /_ping http://localhost:5000/_ping + ProxyPassReverse /_ping http://localhost:5000/_ping + + # Authentication require for push + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer account only + + Require user docker-deployer + + + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # + # Registry v2 + # + + ProxyPass /v2 http://localhost:5002/v2 + ProxyPassReverse /v2 http://localhost:5002/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require user docker-deployer + + + + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh new file mode 100644 index 00000000..d907cf5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh @@ -0,0 +1,119 @@ +#! /bin/bash +# +# Ceph cluster setup in Circle CI +# + +set -x +set -e +set -u + +NODE=$(hostname) +CEPHDIR=/tmp/ceph + +mkdir cluster +pushd cluster + +# Install +retries=0 +until [ $retries -ge 5 ]; do + pip install ceph-deploy && break + retries=$[$retries+1] + sleep 30 +done + +retries=0 +until [ $retries -ge 5 ]; do + ceph-deploy install --release hammer $NODE && break + retries=$[$retries+1] + sleep 30 +done + +retries=0 +until [ $retries -ge 5 ]; do + ceph-deploy pkg --install librados-dev $NODE && break + retries=$[$retries+1] + sleep 30 +done + +echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys +ssh-keyscan $NODE >> ~/.ssh/known_hosts +ceph-deploy new $NODE + +cat >> ceph.conf < 74acc70fa106 + Removing intermediate container edb84c2b40cb + Successfully built 74acc70fa106 + + The commmand outputs its progress until it completes. + +4. Start your configuration with compose. + + $ docker-compose up + Recreating compose_registryv1_1... + Recreating compose_registryv2_1... + Recreating compose_nginx_1... + Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 + ... + + +5. In another terminal, display the running configuration. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 + 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 + aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 + +### Explore a bit + +1. Check for TLS on your `nginx` server. + + $ curl -v https://localhost:5000 + * Rebuilt URL to: https://localhost:5000/ + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 5000 (#0) + * successfully set certificate verify locations: + * CAfile: none + CApath: /etc/ssl/certs + * SSLv3, TLS handshake, Client hello (1): + * SSLv3, TLS handshake, Server hello (2): + * SSLv3, TLS handshake, CERT (11): + * SSLv3, TLS alert, Server hello (2): + * SSL certificate problem: self signed certificate + * Closing connection 0 + curl: (60) SSL certificate problem: self signed certificate + More details here: http://curl.haxx.se/docs/sslcerts.html + +2. Tag the `v1` registry image. + + $ docker tag registry:latest localhost:5000/registry_one:latest + +2. Push it to the localhost. + + $ docker push localhost:5000/registry_one:latest + + If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. + +4. Use `curl` to list the image in the registry. + + $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 32777 (#0) + > GET /v2/registry1/tags/list HTTP/1.1 + > User-Agent: curl/7.36.0 + > Host: localhost:32777 + > Accept: */* + > + < HTTP/1.1 200 OK + < Content-Type: application/json; charset=utf-8 + < Docker-Distribution-Api-Version: registry/2.0 + < Date: Tue, 14 Apr 2015 22:34:13 GMT + < Content-Length: 39 + < + {"name":"registry1","tags":["latest"]} + * Connection #0 to host localhost left intact + + This example refers to the specific port assigned to the 2.0 registry. You saw + this port earlier, when you used `docker ps` to show your running containers. + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml new file mode 100644 index 00000000..5cd04858 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml @@ -0,0 +1,15 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile new file mode 100644 index 00000000..2b252ec7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile @@ -0,0 +1,6 @@ +FROM nginx:1.7 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf new file mode 100644 index 00000000..65c4d776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf new file mode 100644 index 00000000..7b039a54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf new file mode 100644 index 00000000..63cd180d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf new file mode 100644 index 00000000..47ffd237 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf @@ -0,0 +1,41 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + # auth_basic "registry.localhost"; + # auth_basic_user_file test.password; + # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile new file mode 100644 index 00000000..8cc504c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile @@ -0,0 +1,46 @@ +FROM debian:jessie + +MAINTAINER Docker Distribution Team + +# compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + # For DIND + ca-certificates \ + curl \ + iptables \ + procps \ + e2fsprogs \ + xz-utils \ + # For build + build-essential \ + file \ + git \ + net-tools \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install Docker +ENV VERSION 1.7.1 +RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ + && chmod +x /usr/local/bin/docker + +# Install DIND +RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/v1.8.1/hack/dind \ + && chmod +x /dind + +# Install bats +RUN cd /usr/local/src/ \ + && git clone https://github.com/sstephenson/bats.git \ + && cd bats \ + && ./install.sh /usr/local + +# Install docker-compose +RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ + && chmod +x /usr/local/bin/docker-compose + +RUN mkdir -p /go/src/github.com/docker/distribution +WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration + +VOLUME /var/lib/docker + +ENTRYPOINT ["/dind"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md new file mode 100644 index 00000000..e5708501 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md @@ -0,0 +1,138 @@ +# Docker Registry Integration Testing + +These integration tests cover interactions between the Docker daemon and the +registry server. All tests are run using the docker cli. + +The compose configuration is intended to setup a testing environment for Docker +using multiple registry configurations. These configurations include different +combinations of a v1 and v2 registry as well as TLS configurations. + +## Running inside of Docker +### Get integration container +The container image to run the integation tests will need to be pulled or built +locally. + +*Building locally* +``` +$ docker build -t distribution/docker-integration . +``` + +### Run script + +Invoke the tests within Docker through the `run.sh` script. + +``` +$ ./run.sh +``` + +Run with aufs driver and tmp volume +**NOTE: Using a volume will prevent multiple runs from needing to +re-pull images** +``` +$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh +``` + +### Example developer flow + +These tests are useful for developing both as a registry and docker +core developer. The following setup may be used to do integration +testing between development versions + +Insert into your `.zshrc` or `.bashrc` + +``` +# /usr/lib/docker for Docker-in-Docker +# Set this directory to make each invocation run much faster, without +# the need to repull images. +export DOCKER_VOLUME=$HOME/.docker-test-volume + +# Use overlay for all Docker testing, try aufs if overlay not supported +export DOCKER_GRAPHDRIVER=overlay + +# Name this according to personal preference +function rdtest() { + if [ "$1" != "" ]; then + DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker + if [ ! -f $DOCKER_BINARY ]; then + current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` + echo "$DOCKER_BINARY does not exist" + echo "Current checked out docker version: $current_version" + echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" + return 1 + fi + fi + + $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh +} +``` + +Run with Docker release version +``` +$ rdtest +``` + +Run using local development version of docker +``` +$ cd $GOPATH/src/github.com/docker/docker +$ make binary +$ rdtest `cat VERSION` +``` + +## Running manually outside of Docker + +### Install Docker Compose + +[Docker Compose Installation Guide](https://docs.docker.com/compose/install/) + +### Start compose setup +``` +docker-compose up +``` + +### Install Certificates +The certificates must be installed in /etc/docker/cert.d in order to use TLS +client auth and use the CA certificate. +``` +sudo sh ./install_certs.sh +``` + +### Test with Docker +Tag an image as with any other private registry. Attempt to push the image. + +``` +docker pull hello-world +docker tag hello-world localhost:5440/hello-world +docker push localhost:5440/hello-world + +docker tag hello-world localhost:5441/hello-world +docker push localhost:5441/hello-world +# Perform login using user `testuser` and password `passpassword` +``` + +### Set /etc/hosts entry +Find the non-localhost ip address of local machine + +### Run bats +Run the bats tests after updating /etc/hosts, installing the certificates, and +running the `docker-compose` script. +``` +bats -p . +``` + +## Configurations + +Port | V2 | V1 | TLS | Authentication +--- | --- | --- | --- | --- +5000 | yes | yes | no | none +5001 | no | yes | no | none +5002 | yes | no | no | none +5011 | no | yes | yes | none +5440 | yes | yes | yes | none +5441 | yes | yes | yes | basic (testuser/passpassword) +5442 | yes | yes | yes | TLS client +5443 | yes | yes | yes | TLS client (no CA) +5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) +5445 | yes | yes | yes (no CA) | none +5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) +5447 | yes | yes | yes (no CA) | TLS client +5448 | yes | yes | yes (SSLv3) | none diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml new file mode 100644 index 00000000..d664c7bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml @@ -0,0 +1,27 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + - "5001:5001" + - "5002:5002" + - "5011:5011" + - "5440:5440" + - "5441:5441" + - "5442:5442" + - "5443:5443" + - "5444:5444" + - "5445:5445" + - "5446:5446" + - "5447:5447" + - "5448:5448" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry:0.9.1 + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash new file mode 100644 index 00000000..60d96ae0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash @@ -0,0 +1,21 @@ +# Start docker daemon +function start_daemon() { + # Drivers to use for Docker engines the tests are going to create. + STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} + EXEC_DRIVER=${EXEC_DRIVER:-native} + + docker --daemon --log-level=panic \ + --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & + DOCKER_PID=$! + + # Wait for it to become reachable. + tries=10 + until docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 + done +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh new file mode 100644 index 00000000..c1fa2b20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh @@ -0,0 +1,38 @@ +#!/bin/sh +set -e + +hostname=$1 +if [ "$hostname" = "" ]; then + hostname="localhost" +fi + +mkdir -p /etc/docker/certs.d/$hostname:5011 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5440 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5441 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5442 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5443 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt +cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert +cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5444 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5447 +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5448 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile new file mode 100644 index 00000000..04515e8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:1.9 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf +COPY registry-noauth.conf /etc/nginx/registry-noauth.conf +COPY registry-basic.conf /etc/nginx/registry-basic.conf +COPY test.passwd /etc/nginx/test.passwd +COPY ssl /etc/nginx/ssl diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf new file mode 100644 index 00000000..65c4d776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf new file mode 100644 index 00000000..5b1a2d58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf new file mode 100644 index 00000000..63cd180d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf new file mode 100644 index 00000000..3c629ae8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf @@ -0,0 +1,13 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + include docker-registry-v2.conf; +} +location / { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + include docker-registry.conf; +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf new file mode 100644 index 00000000..883a2d48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf @@ -0,0 +1,8 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + include docker-registry-v2.conf; +} +location / { + include docker-registry.conf; +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf new file mode 100644 index 00000000..b402eacb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf @@ -0,0 +1,277 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + +# No client auth or TLS (V1 Only) +server { + listen 5001; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry.conf; + } +} + +# No client auth or TLS (V2 Only) +server { + listen 5002; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry-v2.conf; + } +} + +# TLS localhost (V1 Only) +server { + listen 5011; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + + client_max_body_size 0; + chunked_transfer_encoding on; + location / { + include docker-registry.conf; + } +} + +# TLS localregistry (V1 Only) +server { + listen 5011; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + + client_max_body_size 0; + chunked_transfer_encoding on; + location / { + include docker-registry.conf; + } +} + + + +# TLS Configuration chart +# Username/Password: testuser/passpassword +# | ca | client | basic | notes +# 5440 | yes | no | no | Tests CA certificate +# 5441 | yes | no | yes | Tests basic auth over TLS +# 5442 | yes | yes | no | Tests client auth with client CA +# 5443 | yes | yes | no | Tests client auth without client CA +# 5444 | yes | yes | yes | Tests using basic auth + tls auth +# 5445 | no | no | no | Tests insecure using TLS +# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS +# 5447 | no | yes | no | Tests client auth to insecure +# 5448 | yes | no | no | Bad SSL version + +server { + listen 5440; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + +# Add configuration for localregistry server_name +# Requires configuring /etc/hosts to use +# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing +# Docker secure/insecure registry features +server { + listen 5440; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd new file mode 100644 index 00000000..4e55de81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd @@ -0,0 +1 @@ +testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh new file mode 100644 index 00000000..c8831a09 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +set -e +set -x + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +source helpers.bash + +# Root directory of Distribution +DISTRIBUTION_ROOT=$(cd ../..; pwd -P) + +volumeMount="" +if [ "$DOCKER_VOLUME" != "" ]; then + volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" +fi + +dockerMount="" +if [ "$DOCKER_BINARY" != "" ]; then + dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" +else + DOCKER_BINARY=docker +fi + +# Image containing the integration tests environment. +INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} + +if [ "$1" == "-d" ]; then + start_daemon + shift +fi + +TESTS=${@:-.} + +# Make sure we upgrade the integration environment. +docker pull $INTEGRATION_IMAGE + +# Start a Docker engine inside a docker container +ID=$(docker run -d -it --privileged $volumeMount $dockerMount \ + -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ + -e "DOCKER_GRAPHDRIVER=$DOCKER_GRAPHDRIVER" \ + ${INTEGRATION_IMAGE} \ + ./run_engine.sh) + +# Stop container on exit +trap "docker rm -f -v $ID" EXIT + + +# Wait for it to become reachable. +tries=10 +until docker exec "$ID" docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 +done + +# If no volume is specified, transfer images into the container from +# the outer docker instance +if [ "$DOCKER_VOLUME" == "" ]; then + # Make sure we have images outside the container, to transfer to the container. + # Not much will happen here if the images are already present. + docker-compose pull + docker-compose build + + # Transfer images to the inner container. + for image in "$INTEGRATION_IMAGE" registry:0.9.1 dockerintegration_nginx dockerintegration_registryv2; do + docker save "$image" | docker exec -i "$ID" docker load + done +fi + +# Run the tests. +docker exec -it "$ID" sh -c "./test_runner.sh $TESTS" + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh new file mode 100644 index 00000000..d14384cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh @@ -0,0 +1,23 @@ +#!/bin/sh +set -e +set -x + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-overlay} +EXEC_DRIVER=${EXEC_DRIVER:-native} + +# Set IP address in /etc/hosts for localregistry +IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') +echo "$IP localregistry" >> /etc/hosts + +sh install_certs.sh localregistry + +DOCKER_VERSION=$(docker --version | cut -d ' ' -f3 | cut -d ',' -f1) +major=$(echo "$DOCKER_VERSION"| cut -d '.' -f1) +minor=$(echo "$DOCKER_VERSION"| cut -d '.' -f2) + +daemonOpts="daemon" +if [ $major -le 1 ] && [ $minor -lt 9 ]; then + daemonOpts="--daemon" +fi + +docker $daemonOpts --log-level=debug --storage-driver="$DOCKER_GRAPHDRIVER" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh new file mode 100644 index 00000000..73a70c7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Run the integration tests with multiple versions of the Docker engine + +set -e +set -x + +source helpers.bash + +if [ `uname` = "Linux" ]; then + tmpdir_template="$TMPDIR/docker-versions.XXXXX" +else + # /tmp isn't available for mounting in boot2docker + tmpdir_template="`pwd`/../../../docker-versions.XXXXX" +fi + +tmpdir=`mktemp -d "$tmpdir_template"` +trap "rm -rf $tmpdir" EXIT + +if [ "$1" == "-d" ]; then + start_daemon +fi + +# Released versions + +versions="1.6.1 1.7.1 1.8.3 1.9.1" + +for v in $versions; do + echo "Extracting Docker $v from dind image" + binpath="$tmpdir/docker-$v/docker" + ID=$(docker create dockerswarm/dind:$v) + docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" + + echo "Running tests with Docker $v" + DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" ./run.sh + + # Cleanup. + docker rm -f "$ID" +done + +# Latest experimental version + +echo "Extracting Docker master from dind image" +binpath="$tmpdir/docker-master/docker" +docker pull dockerswarm/dind-master +ID=$(docker create dockerswarm/dind-master) +docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" + +echo "Running tests with Docker master" +DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" ./run.sh + +# Cleanup. +docker rm -f "$ID" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh new file mode 100644 index 00000000..0a628238 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +TESTS=${@:-.} + +function execute() { + >&2 echo "++ $@" + eval "$@" +} + +execute time docker-compose build + +execute docker-compose up -d + +# Run the tests. +execute time bats -p $TESTS diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats new file mode 100644 index 00000000..8b7ae287 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats @@ -0,0 +1,102 @@ +# Registry host name, should be set to non-localhost address and match +# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d +hostname="localregistry" + +image="hello-world:latest" + +# Login information, should match values in nginx/test.passwd +user="testuser" +password="passpassword" +email="distribution@docker.com" + +function setup() { + docker pull $image +} + +# skip basic auth tests with Docker 1.6, where they don't pass due to +# certificate issues +function basic_auth_version_check() { + run sh -c 'docker version | fgrep -q "Client version: 1.6."' + if [ "$status" -eq 0 ]; then + skip "Basic auth tests don't support 1.6.x" + fi +} + +# has_digest enforces the last output line is "Digest: sha256:..." +# the input is the name of the array containing the output lines +function has_digest() { + filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') + [ "$filtered" != "" ] +} + +function login() { + run docker login -u $user -p $password -e $email $1 + [ "$status" -eq 0 ] + # First line is WARNING about credential save + [ "${lines[1]}" = "Login Succeeded" ] +} + +@test "Test valid certificates" { + docker tag -f $image $hostname:5440/$image + run docker push $hostname:5440/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth" { + basic_auth_version_check + login $hostname:5441 + docker tag -f $image $hostname:5441/$image + run docker push $hostname:5441/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client auth" { + docker tag -f $image $hostname:5442/$image + run docker push $hostname:5442/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client with invalid certificate authority fails" { + docker tag -f $image $hostname:5443/$image + run docker push $hostname:5443/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with TLS client auth" { + basic_auth_version_check + login $hostname:5444 + docker tag -f $image $hostname:5444/$image + run docker push $hostname:5444/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test unknown certificate authority fails" { + docker tag -f $image $hostname:5445/$image + run docker push $hostname:5445/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with unknown certificate authority fails" { + run login $hostname:5446 + [ "$status" -ne 0 ] + docker tag -f $image $hostname:5446/$image + run docker push $hostname:5446/$image + [ "$status" -ne 0 ] +} + +@test "Test TLS client auth to server with unknown certificate authority fails" { + docker tag -f $image $hostname:5447/$image + run docker push $hostname:5447/$image + [ "$status" -ne 0 ] +} + +@test "Test failure to connect to server fails to fallback to SSLv3" { + docker tag -f $image $hostname:5448/$image + run docker push $hostname:5448/$image + [ "$status" -ne 0 ] +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh b/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh new file mode 100644 index 00000000..7ee751ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go new file mode 100644 index 00000000..31d821bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go @@ -0,0 +1,139 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + break + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go new file mode 100644 index 00000000..f3105a45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go new file mode 100644 index 00000000..f64b0db3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go @@ -0,0 +1,42 @@ +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go new file mode 100644 index 00000000..3fac41b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go @@ -0,0 +1,245 @@ +package digest + +import ( + "errors" + "sort" + "strings" + "sync" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are ommited from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg Algorithm + hex string + ) + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg Algorithm + val string + digest Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go new file mode 100644 index 00000000..9af3be13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go @@ -0,0 +1,44 @@ +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) (Verifier, error) { + if err := d.Validate(); err != nil { + return nil, err + } + + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/doc.go new file mode 100644 index 00000000..bdd8cb70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile new file mode 100644 index 00000000..89ee3d29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile @@ -0,0 +1,17 @@ +FROM docs/base:latest +MAINTAINER Mary Anthony (@moxiegirl) + +RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine +RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose +RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine +RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry +RUN svn checkout https://github.com/kitematic/kitematic/trunk/docs /docs/content/kitematic +RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials +RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource + +ENV PROJECT=registry + +# To get the git info for this repo +COPY . /src + +COPY . /docs/content/$PROJECT/ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile new file mode 100644 index 00000000..021e8f6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile @@ -0,0 +1,55 @@ +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate + +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_GRAPHDRIVER \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +# Get the IP ADDRESS +DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") +HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") +HUGO_BIND_IP=0.0.0.0 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) + + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: docs + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-draft: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + + +docs-build: +# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files +# echo "$(GIT_BRANCH)" > GIT_BRANCH +# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET +# echo "$(GITCOMMIT)" > GITCOMMIT + docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md new file mode 100644 index 00000000..c3d3afd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md @@ -0,0 +1,213 @@ + + +# Authenticating proxy with apache + +## Use-case + +People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. + +Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. + +### Alternatives + +If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). + +### Solution + +With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. + +While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the exemple. + +We also implement push restriction (to a limited user group) for the sake of the exemple. Again, you should modify this to fit your mileage. + +### Gotchas + +While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. + +## Setting things up + +Read again [the requirements](recipes.md#requirements). + +Ready? + +Run the following script: + +``` +mkdir -p auth +mkdir -p data + +# This is the main apache configuration you will use +cat < auth/httpd.conf +LoadModule headers_module modules/mod_headers.so + +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule authz_core_module modules/mod_authz_core.so +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule access_compat_module modules/mod_access_compat.so + +LoadModule log_config_module modules/mod_log_config.so + +LoadModule ssl_module modules/mod_ssl.so + +LoadModule proxy_module modules/mod_proxy.so +LoadModule proxy_http_module modules/mod_proxy_http.so + +LoadModule unixd_module modules/mod_unixd.so + + + SSLRandomSeed startup builtin + SSLRandomSeed connect builtin + + + + User daemon + Group daemon + + +ServerAdmin you@example.com + +ErrorLog /proc/self/fd/2 + +LogLevel warn + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + CustomLog /proc/self/fd/1 common + + +ServerRoot "/usr/local/apache2" + +Listen 5043 + + + AllowOverride none + Require all denied + + + + + ServerName myregistrydomain.com + + SSLEngine on + SSLCertificateFile /usr/local/apache2/conf/domain.crt + SSLCertificateKeyFile /usr/local/apache2/conf/domain.key + + ## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html + # Anti CRIME + SSLCompression off + + # POODLE and other stuff + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + + # Secure cypher suites + SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH + SSLHonorCipherOrder on + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /v2 http://registry:5000/v2 + ProxyPassReverse /v2 http://registry:5000/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd" + AuthGroupFile "/usr/local/apache2/conf/httpd.groups" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require group pusher + + + + + +EOF + +# Now, create a password file for "testuser" and "testpassword" +docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd +# Create another one for "testuserpush" and "testpasswordpush" +docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd + +# Create your group file +echo "pusher: testuserpush" > auth/httpd.groups + +# Copy over your certificate files +cp domain.crt auth +cp domain.key auth + +# Now create your compose file + +cat < docker-compose.yml +apache: + image: "httpd:2.4" + hostname: myregistrydomain.com + ports: + - 5043:5043 + links: + - registry:registry + volumes: + - `pwd`/auth:/usr/local/apache2/conf + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + volumes: + - `pwd`/data:/var/lib/registry + +EOF +``` + +## Starting and stopping + +Now, start your stack: + + docker-compose up -d + +Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: + + docker login myregistrydomain.com:5043 + docker tag ubuntu myregistrydomain.com:5043/test + docker push myregistrydomain.com:5043/test + +Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image: + + docker login myregistrydomain.com:5043 + docker pull myregistrydomain.com:5043/test + +Verify that the "pull-only" can NOT push: + + docker push myregistrydomain.com:5043/test diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md new file mode 100644 index 00000000..39251760 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md @@ -0,0 +1,54 @@ + + +# Architecture + +## Design +**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. + +### Eventual Consistency + +> **NOTE:** This section belongs somewhere, perhaps in a design document. We +> are leaving this here so the information is not lost. + +Running the registry on eventually consistent backends has been part of the +design from the beginning. This section covers some of the approaches to +dealing with this reality. + +There are a few classes of issues that we need to worry about when +implementing something on top of the storage drivers: + +1. Read-After-Write consistency (see this [article on + s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). +2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). + +In reality, the registry must worry about these kinds of errors when doing the +following: + +1. Accepting data into a temporary upload file may not have latest data block + yet (read-after-write). +2. Moving uploaded data into its blob location (write-write race). +3. Modifying the "current" manifest for given tag (write-write race). +4. A whole slew of operations around deletes (read-after-write, delete-write + races, garbage collection, etc.). + +The backend path layout employs a few techniques to avoid these problems: + +1. Large writes are done to private upload directories. This alleviates most + of the corruption potential under multiple writers by avoiding multiple + writers. +2. Constraints in storage driver implementations, such as support for writing + after the end of a file to extend it. +3. Digest verification to avoid data corruption. +4. Manifest files are stored by digest and cannot change. +5. All other non-content files (links, hashes, etc.) are written as an atomic + unit. Anything that requires additions and deletions is broken out into + separate "files". Last writer still wins. + +Unfortunately, one must play this game when trying to build something like +this on top of eventually consistent storage systems. If we run into serious +problems, we can wrap the storagedrivers in a shared consistency layer but +that would increase complexity and hinder registry cluster performance. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md new file mode 100644 index 00000000..5672210b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md @@ -0,0 +1,158 @@ + + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/tools/godep github.com/golang/lint/golint + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ GOPATH=`godep path`:$GOPATH make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/Sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry -version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Developing + +The above approaches are helpful for small experimentation. If more complex +tasks are at hand, it is recommended to employ the full power of `godep`. + +The Makefile is designed to have its `GOPATH` defined externally. This allows +one to experiment with various development environment setups. This is +primarily useful when testing upstream bugfixes, by modifying local code. This +can be demonstrated using `godep` to migrate the `GOPATH` to use the specified +dependencies. The `GOPATH` can be migrated to the current package versions +declared in `Godeps` with the following command: + + godep restore + +> **WARNING:** This command will checkout versions of the code specified in +> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is +> undesired, it is recommended to create a workspace devoted to work on the +> _Distribution_ project. + +With a successful run of the above command, one can now use `make` without +specifying the `GOPATH`: + + make + +If that is successful, standard `go` commands, such as `go test` should work, +per package, without issue. + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. + +To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) +(librados-dev and librbd-dev will be required to build the bindings): + + export DOCKER_BUILDTAGS='include_rados' diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md new file mode 100644 index 00000000..7cdfdf40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md @@ -0,0 +1,2188 @@ + + +# Registry Configuration Reference + +The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. + +## Override specific configuration options + +In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. + +To override a configuration option, create an environment variable named +`REGISTRY_variable` where *`variable`* is the name of the configuration option +and the `_` (underscore) represents indention levels. For example, you can +configure the `rootdirectory` of the `filesystem` storage backend: + + storage: + filesystem: + rootdirectory: /var/lib/registry + +To override this value, set an environment variable like this: + + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere + +This variable overrides the `/var/lib/registry` value to the `/somewhere` +directory. + +>**NOTE**: It is highly recommended to create a base configuration file with which environment variables can be used to tweak individual values. Overriding configuration sections with environment variables is not recommended. + +## Overriding the entire configuration file + +If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. + +Typically, create a new configuration file from scratch, and call it `config.yml`, then: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/config.yml:/etc/docker/registry/config.yml \ + registry:2 + +You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). + +## List of configuration options + +This section lists all the registry configuration options. Some options in +the list are mutually exclusive. So, make sure to read the detailed reference +information about each option that appears later in this page. + + version: 0.1 + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com + loglevel: debug # deprecated: use "log" + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + bucket: bucketname + encrypt: true + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + rados: + poolname: radospool + username: radosuser + chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + delete: + enabled: false + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000 + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + http: + addr: localhost:5000 + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +In some instances a configuration option is **optional** but it contains child +options marked as **required**. This indicates that you can omit the parent with +all its children. However, if the parent is included, you must also include all +the children marked **required**. + +## version + + version: 0.1 + +The `version` option is **required**. It specifies the configuration's version. +It is expected to remain a top-level field, to allow for a consistent version +check before parsing the remainder of the configuration file. + +## log + +The `log` subsection configures the behavior of the logging system. The logging +system outputs everything to stdout. You can adjust the granularity and format +with this configuration section. + + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ level + + no + + Sets the sensitivity of logging output. Permitted values are + error, warn, info and + debug. The default is info. +
+ formatter + + no + + This selects the format of logging output. The format primarily affects how keyed + attributes for a log line are encoded. Options are text, json or + logstash. The default is text. +
+ fields + + no + + A map of field names to values. These are added to every log line for + the context. This is useful for identifying log messages source after + being mixed in other systems. +
+ +## hooks + + hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com + +The `hooks` subsection configures the logging hooks' behavior. This subsection +includes a sequence handler which you can use for sending mail, for example. +Refer to `loglevel` to configure the level of messages printed. + +## loglevel + +> **DEPRECATED:** Please use [log](#log) instead. + + loglevel: debug + +Permitted values are `error`, `warn`, `info` and `debug`. The default is +`info`. + +## storage + + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + bucket: bucketname + encrypt: true + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + rados: + poolname: radospool + username: radosuser + chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + delete: + enabled: false + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + redirect: + disable: false + +The storage option is **required** and defines which storage backend is in use. +You must configure one backend; if you configure more, the registry returns an error. + +If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 characters). Failure to do so can result in the following error message: + + mkdir /XXX protocol error and your registry will not function properly. + +### Maintenance + +Currently upload purging and read-only mode are the only maintenance functions available. +These and future maintenance functions which are related to storage can be configured under +the maintenance section. + +### Upload Purging + +Upload purging is a background process that periodically removes orphaned files from the upload +directories of the registry. Upload purging is enabled by default. To +configure upload directory purging, the following parameters +must be set. + + +| Parameter | Required | Description + --------- | -------- | ----------- +`enabled` | yes | Set to true to enable upload purging. Default=true. | +`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) +`interval` | yes | The interval between upload directory purging. Default=24h. +`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. + +Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). + +### Read-only mode + +If the `readonly` section under `maintenance` has `enabled` set to `true`, +clients will not be allowed to write to the registry. This mode is useful to +temporarily prevent writes to the backend storage so a garbage collection pass +can be run. Before running garbage collection, the registry should be +restarted with readonly's `enabled` set to true. After the garbage collection +pass finishes, the registry may be restarted again, this time with `readonly` +removed from the configuration (or set to false). + +### delete + +Use the `delete` subsection to enable the deletion of image blobs and manifests +by digest. It defaults to false, but it can be enabled by writing the following +on the configuration file: + + delete: + enabled: true + +### cache + +Use the `cache` subsection to enable caching of data accessed in the storage +backend. Currently, the only available cache provides fast access to layer +metadata. This, if configured, uses the `blobdescriptor` field. + +You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses +a Redis pool to cache layer metadata. The `inmemory` value uses an in memory +map. + +>**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +>are equivalent, `layerinfo` has been deprecated, in favor or +>`blobdescriptor`. + +### redirect + +The `redirect` subsection provides configuration for managing redirects from +content backends. For backends that support it, redirecting is enabled by +default. Certain deployment scenarios may prefer to route all data through the +Registry, rather than redirecting to the backend. This may be more efficient +when using a backend that is not colocated or when a registry instance is +doing aggressive caching. + +Redirects can be disabled by adding a single flag `disable`, set to `true` +under the `redirect` section: + + redirect: + disable: true + +### filesystem + +The `filesystem` storage backend uses the local disk to store registry files. It +is ideal for development and may be appropriate for some small-scale production +applications. + +This backend has a single, required `rootdirectory` parameter. The parameter +specifies the absolute path to a directory. The registry stores all its data +here so make sure there is adequate space available. + +### azure + +This storage backend uses Microsoft's Azure Blob Storage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accountname + + yes + + Azure account name. +
+ accountkey + + yes + + Azure account key. +
+ container + + yes + + Name of the Azure container into which to store data. +
+ realm + + no + + Domain name suffix for the Storage Service API endpoint. By default, this + is core.windows.net. +
+ +### gcs + +This storage backend uses Google Cloud Storage. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ bucket + + yes + + Storage bucket name. +
+ keyfile + + no + + A private service account key file in JSON format. Instead of a key file Google Application Default Credentials can be used. +
+ rootdirectory + + no + + This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. +
+ +### rados + +This storage backend uses [Ceph Object Storage](http://ceph.com/docs/master/rados/). + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ poolname + + yes + + Ceph pool name. +
+ username + + no + + Ceph cluster user to connect as (i.e. admin, not client.admin). +
+ chunksize + + no + + Size of the written RADOS objects. Default value is 4MB (4194304). +
+ + +### S3 + +This storage backend uses Amazon's Simple Storage Service (S3). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accesskey + + yes + + Your AWS Access Key. +
+ secretkey + + yes + + Your AWS Secret Key. +
+ region + + yes + + The AWS region in which your bucket exists. For the moment, the Go AWS + library in use does not use the newer DNS based bucket routing. +
+ bucket + + yes + + The bucket name in which you want to store the registry's data. +
+ encrypt + + no + + Specifies whether the registry stores the image in encrypted format or + not. A boolean value. The default is false. +
+ secure + + no + + Indicates whether to use HTTPS instead of HTTP. A boolean value. The + default is true. +
+ v4auth + + no + + Indicates whether the registry uses Version 4 of AWS's authentication. + Generally, you should set this to true. By default, this is + false. +
+ chunksize + + no + + The S3 API requires multipart upload chunks to be at least 5MB. This value + should be a number that is larger than 5*1024*1024. +
+ rootdirectory + + no + + This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. +
+ +### Openstack Swift + +This storage backend uses Openstack Swift object storage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ authurl + + yes + + URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +
+ username + + yes + + Your Openstack user name. +
+ password + + yes + + Your Openstack password. +
+ region + + no + + The Openstack region in which your container exists. +
+ container + + yes + + The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. +
+ tenant + + no + + Your Openstack tenant name. You can either use tenant or tenantid. +
+ tenantid + + no + + Your Openstack tenant id. You can either use tenant or tenantid. +
+ domain + + no + + Your Openstack domain name for Identity v3 API. You can either use domain or domainid. +
+ domainid + + no + + Your Openstack domain id for Identity v3 API. You can either use domain or domainid. +
+ trustid + + no + + Your Openstack trust id for Identity v3 API. +
+ insecureskipverify + + no + + true to skip TLS verification, false by default. +
+ chunksize + + no + + Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). +
+ prefix + + no + + This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. +
+ secretkey + + no + + The secret key used to generate temporary URLs. +
+ accesskey + + no + + The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter. +
+ + +## auth + + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + +The `auth` option is **optional**. There are +currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only +one `auth` provider. + +### silly + +The `silly` auth is only for development purposes. It simply checks for the +existence of the `Authorization` header in the HTTP request. It has no regard for +the header's value. If the header does not exist, the `silly` auth responds with a +challenge response, echoing back the realm, service, and scope that access was +denied for. + +The following values are used to configure the response: + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ + + +### token + +Token based authentication allows the authentication system to be decoupled from +the registry. It is a well established authentication paradigm with a high +degree of security. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ issuer + + yes + +The name of the token issuer. The issuer inserts this into +the token so it must match the value configured for the issuer. +
+ rootcertbundle + + yes + +The absolute path to the root certificate bundle. This bundle contains the +public part of the certificates that is used to sign authentication tokens. +
+ +For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). + +### htpasswd + +The _htpasswd_ authentication backed allows one to configure basic auth using an +[Apache HTPasswd File](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). +Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are +supported. Entries with other hash types will be ignored. The htpasswd file is +loaded once, at startup. If the file is invalid, the registry will display an +error and will not start. + +> __WARNING:__ This authentication scheme should only be used with TLS +> configured, since basic authentication sends passwords as part of the http +> header. + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ path + + yes + + Path to htpasswd file to load at startup. +
+ +## middleware + +The `middleware` option is **optional**. Use this option to inject middleware at +named hook points. All middlewares must implement the same interface as the +object they're wrapping. This means a registry middleware must implement the +`distribution.Namespace` interface, repository middleware must implement +`distribution.Repository`, and storage middleware must implement +`driver.StorageDriver`. + +Currently only one middleware, `cloudfront`, a storage middleware, is supported +in the registry implementation. + + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000 + +Each middleware entry has `name` and `options` entries. The `name` must +correspond to the name under which the middleware registers itself. The +`options` field is a map that details custom configuration required to +initialize the middleware. It is treated as a `map[string]interface{}`. As such, +it supports any interesting structures desired, leaving it up to the middleware +initialization function to best determine how to handle the specific +interpretation of the options. + +### cloudfront + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ baseurl + + yes + + SCHEME://HOST[/PATH] at which Cloudfront is served. +
+ privatekey + + yes + + Private Key for Cloudfront provided by AWS. +
+ keypairid + + yes + + Key pair ID provided by AWS. +
+ duration + + no + + Duration for which a signed URL should be valid. +
+ + +## reporting + + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + +The `reporting` option is **optional** and configures error and metrics +reporting tools. At the moment only two services are supported, [New +Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid +configuration may contain both. + +### bugsnag + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ apikey + + yes + + API Key provided by Bugsnag +
+ releasestage + + no + + Tracks where the registry is deployed, for example, + production,staging, or + development. +
+ endpoint + + no + + Specify the enterprise Bugsnag endpoint. +
+ + +### newrelic + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ licensekey + + yes + + License key provided by New Relic. +
+ name + + no + + New Relic application name. +
+ verbose + + no + + Enable New Relic debugging output on stdout. +
+ +## http + + http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + +The `http` option details the configuration for the HTTP server that hosts the registry. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + The address for which the server should accept connections. The form depends on a network type (see net option): + HOST:PORT for tcp and FILE for a unix socket. +
+ net + + no + + The network which is used to create a listening socket. Known networks are unix and tcp. + The default empty value means tcp. +
+ prefix + + no + +If the server does not run at the root path use this value to specify the +prefix. The root path is the section before v2. It +should have both preceding and trailing slashes, for example /path/. +
+ host + + no + +This parameter specifies an externally-reachable address for the registry, as a +fully qualified URL. If present, it is used when creating generated URLs. +Otherwise, these URLs are derived from client requests. +
+ secret + + yes + +A random piece of data. This is used to sign state that may be stored with the +client to protect against tampering. For production environments you should generate a +random piece of data using a cryptographically secure random generator. This +configuration parameter may be omitted, in which case the registry will automatically +generate a secret at launch. +

+WARNING: If you are building a cluster of registries behind a load balancer, you MUST +ensure the secret is the same for all registries. +

+ + +### tls + +The `tls` struct within `http` is **optional**. Use this to configure TLS +for the server. If you already have a server such as Nginx or Apache running on +the same host as the registry, you may prefer to configure TLS termination there +and proxy connections to the registry server. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ certificate + + yes + + Absolute path to x509 cert file +
+ key + + yes + + Absolute path to x509 private key file. +
+ clientcas + + no + + An array of absolute paths to a x509 CA file +
+ + +### debug + +The `debug` option is **optional** . Use it to configure a debug server that +can be helpful in diagnosing problems. The debug endpoint can be used for +monitoring registry metrics and health, as well as profiling. Sensitive +information may be available via the debug endpoint. Please be certain that +access to the debug endpoint is locked down in a production environment. + +The `debug` section takes a single, required `addr` parameter. This parameter +specifies the `HOST:PORT` on which the debug server should accept connections. + + +### headers + +The `headers` option is **optional** . Use it to specify headers that the HTTP +server should include in responses. This can be used for security headers such +as `Strict-Transport-Security`. + +The `headers` option should contain an option for each header to include, where +the parameter name is the header's name, and the parameter value a list of the +header's payload values. + +Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers +will not interpret content as HTML if they are directed to load a page from the +registry. This header is included in the example configuration files. + + +## notifications + + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + +The notifications option is **optional** and currently may contain a single +option, `endpoints`. + +### endpoints + +Endpoints is a list of named services (URLs) that can accept event notifications. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ name + + yes + +A human readable name for the service. +
+ disabled + + no + +A boolean to enable/disable notifications for a service. +
+ url + + yes + +The URL to which events should be published. +
+ headers + + yes + + Static headers to add to each request. Each header's name should be a key + underneath headers, and each value is a list of payloads for that + header name. Note that values must always be lists. +
+ timeout + + yes + + An HTTP timeout value. This field takes a positive integer and an optional + suffix indicating the unit of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ threshold + + yes + + An integer specifying how long to wait before backing off a failure. +
+ backoff + + yes + + How long the system backs off before retrying. This field takes a positive + integer and an optional suffix indicating the unit of time. Possible units + are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ + +## redis + + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +Declare parameters for constructing the redis connections. Registry instances +may use the Redis instance for several applications. The current purpose is +caching information about immutable blobs. Most of the options below control +how the registry connects to redis. You can control the pool's behavior +with the [pool](#pool) subsection. + +It's advisable to configure Redis itself with the **allkeys-lru** eviction policy +as the registry does not set an expire value on keys. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + Address (host and port) of redis instance. +
+ password + + no + + A password used to authenticate to the redis instance. +
+ db + + no + + Selects the db for each connection. +
+ dialtimeout + + no + + Timeout for connecting to a redis instance. +
+ readtimeout + + no + + Timeout for reading from redis connections. +
+ writetimeout + + no + + Timeout for writing to redis connections. +
+ + +### pool + + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +Configure the behavior of the Redis connection pool. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ maxidle + + no + + Sets the maximum number of idle connections. +
+ maxactive + + no + + sets the maximum number of connections that should + be opened before blocking a connection request. +
+ idletimeout + + no + + sets the amount time to wait before closing + inactive connections. +
+ +## health + + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 + +The health option is **optional**. It may contain preferences for a periodic +health check on the storage driver's backend storage, and optional periodic +checks on local files, HTTP URIs, and/or TCP servers. The results of the health +checks are available at /debug/health on the debug HTTP server if the debug +HTTP server is enabled (see http section). + +### storagedriver + +storagedriver contains options for a health check on the configured storage +driver's backend storage. enabled must be set to true for this health check to +be active. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ enabled + + yes + +"true" to enable the storage driver health check or "false" to disable it. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +### file + +file is a list of paths to be periodically checked for the existence of a file. +If a file exists at the given path, the health check will fail. This can be +used as a way of bringing a registry out of rotation by creating a file. + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ file + + yes + +The path to check for the existence of a file. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ +### http + +http is a list of HTTP URIs to be periodically checked with HEAD requests. If +a HEAD request doesn't complete or returns an unexpected status code, the +health check will fail. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ uri + + yes + +The URI to check. +
+ headers + + no + + Static headers to add to each request. Each header's name should be a key + underneath headers, and each value is a list of payloads for that + header name. Note that values must always be lists. +
+ statuscode + + no + +Expected status code from the HTTP URI. Defaults to 200. +
+ timeout + + no + + The length of time to wait before timing out the HTTP request. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +### tcp + +tcp is a list of TCP addresses to be periodically checked with connection +attempts. The addresses must include port numbers. If a connection attempt +fails, the health check will fail. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + +The TCP address to connect to, including a port number. +
+ timeout + + no + + The length of time to wait before timing out the TCP connection. This + field takes a positive integer and an optional suffix indicating the unit + of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +## Proxy + + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ remoteurl + + yes + + The URL of the official Docker Hub +
+ username + + no + + The username of the Docker Hub account +
+ password + + no + + The password for the official Docker Hub account +
+ +To enable pulling private repositories (e.g. `batman/robin`) a username and password for user `batman` must be specified. Note: These private repositories will be stored in the proxy cache's storage and relevant measures should be taken to protect access to this. + + +## Example: Development configuration + +The following is a simple example you can use for local development: + + version: 0.1 + log: + level: debug + storage: + filesystem: + rootdirectory: /var/lib/registry + http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + +The above configures the registry instance to run on port `5000`, binding to +`localhost`, with the `debug` server enabled. Registry data storage is in the +`/var/lib/registry` directory. Logging is in `debug` mode, which is the most +verbose. + +A similar simple configuration is available at +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). +Both are generally useful for local development. + + +## Example: Middleware configuration + +This example illustrates how to configure storage middleware in a registry. +Middleware allows the registry to serve layers via a content delivery network +(CDN). This is useful for reducing requests to the storage layer. + +Currently, the registry supports [Amazon +Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in +conjunction with the S3 storage driver. + + + + + + + + + + + + + + + + + + +
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: + A set of key/value options to configure the middleware. +
    +
  • baseurl: The Cloudfront base URL.
  • +
  • privatekey: The location of your AWS private key on the filesystem.
  • +
  • keypairid: The ID of your Cloudfront keypair.
  • +
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • +
+
+ +The following example illustrates these values: + + middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 + + +>**Note**: Cloudfront keys exist separately to other AWS keys. See +>[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +>for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md new file mode 100644 index 00000000..77e8f05c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md @@ -0,0 +1,231 @@ + + +# Deploying a registry server + +You need to [install Docker version 1.6.0 or newer](https://docs.docker.com/installation/). + +## Running on localhost + +Start your registry: + + docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +You can now use it with docker. + +Get any image from the hub and tag it to point to your registry: + + docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu + +... then push it to your registry: + + docker push localhost:5000/ubuntu + +... then pull it back from your registry: + + docker pull localhost:5000/ubuntu + +To stop your registry, you would: + + docker stop registry && docker rm -v registry + +## Storage + +By default, your registry data is persisted as a [docker volume](https://docs.docker.com/userguide/dockervolumes/) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. + +Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/data:/var/lib/registry \ + registry:2 + +### Alternatives + +You should usually consider using [another storage backend](https://github.com/docker/distribution/blob/master/docs/storagedrivers.md) instead of the local filesystem. Use the [storage configuration options](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) to configure an alternate storage backend. + +Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. + +## Running a domain registry + +While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. + +### Get a certificate + +Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. + +Create a `certs` directory: + + mkdir -p certs + +Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`. + +Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to access your registry from another docker host: + + docker pull ubuntu + docker tag ubuntu myregistrydomain.com:5000/ubuntu + docker push myregistrydomain.com:5000/ubuntu + docker pull myregistrydomain.com:5000/ubuntu + +#### Gotcha + +A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: + + cat domain.crt intermediate-certificates.pem > certs/domain.crt + +### Alternatives + +While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). + +## Load Balancing Considerations + +One may want to use a load balancer to distribute load, terminate TLS or +provide high availability. While a full load balancing setup is outside the +scope of this document, there are a few considerations that can make the process +smoother. + +The most important aspect is that a load balanced cluster of registries must +share the same resources. For the current version of the registry, this means +the following must be the same: + + - Storage Driver + - HTTP Secret + - Redis Cache (if configured) + +If any of these are different, the registry will have trouble serving requests. +As an example, if you're using the filesystem driver, all registry instances +must have access to the same filesystem root, which means they should be in +the same machine. For other drivers, such as s3 or azure, they should be +accessing the same resource, and will likely share an identical configuration. +The _HTTP Secret_ coordinates uploads, so also must be the same across +instances. Configuring different redis instances will work (at the time +of writing), but will not be optimal if the instances are not shared, causing +more requests to be directed to the backend. + +Getting the headers correct is very important. For all responses to any +request under the "/v2/" url space, the `Docker-Distribution-API-Version` +header should be set to the value "registry/2.0", even for a 4xx response. +This header allows the docker engine to quickly resolve authentication realms +and fallback to version 1 registries, if necessary. Confirming this is setup +correctly can help avoid problems with fallback. + +In the same train of thought, you must make sure you are properly sending the +`X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side" +values. Failure to do so usually makes the registry issue redirects to internal +hostnames or downgrading from https to http. + +A properly secured registry should return 401 when the "/v2/" endpoint is hit +without credentials. The response should include a `WWW-Authenticate` +challenge, providing guidance on how to authenticate, such as with basic auth +or a token service. If the load balancer has health checks, it is recommended +to configure it to consider a 401 response as healthy and any other as down. +This will secure your registry by ensuring that configuration problems with +authentication don't accidentally expose an unprotected registry. If you're +using a less sophisticated load balancer, such as Amazon's Elastic Load +Balancer, that doesn't allow one to change the healthy response code, health +checks can be directed at "/", which will always return a `200 OK` response. + +## Restricting access + +Except for registries running on secure local networks, registries should always implement access restrictions. + +### Native basic auth + +The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). + +> **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. + +First create a password file with one entry for the user "testuser", with password "testpassword": + + mkdir auth + docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd + +Make sure you stopped your registry from the previous step, then start it again: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/auth:/auth \ + -e "REGISTRY_AUTH=htpasswd" \ + -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ + -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to: + + docker login myregistrydomain.com:5000 + +And then push and pull images as an authenticated user. + +#### Gotcha + +Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md). + +### Alternatives + +1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes.md). + +2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. + +You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). + +Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation. + +## Managing with Compose + +As your registry configuration grows more complex, dealing with it can quickly become tedious. + +It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate operating your registry. + +Here is a simple `docker-compose.yml` example that condenses everything explained so far: + +``` +registry: + restart: always + image: registry:2 + ports: + - 5000:5000 + environment: + REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt + REGISTRY_HTTP_TLS_KEY: /certs/domain.key + REGISTRY_AUTH: htpasswd + REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd + REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm + volumes: + - /path/data:/var/lib/registry + - /path/certs:/certs + - /path/auth:/auth +``` + +> **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. + +You can then start your registry with a simple + + docker-compose up -d + +## Next + +You will find more specific and advanced informations in the following sections: + + - [Configuration reference](configuration.md) + - [Working with notifications](notifications.md) + - [Advanced "recipes"](recipes.md) + - [Registry API](spec/api.md) + - [Storage driver model](storagedrivers.md) + - [Token authentication](spec/auth/token.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md new file mode 100644 index 00000000..8159b520 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md @@ -0,0 +1,70 @@ + + +# Glossary + +This page contains definitions for distribution related terms. + +
+

Blob

+
+
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
+

+ Layers are a good example of "blobs". +

+
+ +

Image

+
+
An image is a named set of immutable data from which a Docker container can be created.
+

+ An image is represented by a json file called a manifest, and is conceptually a set of layers. + + Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. + +

+
+ +

Layer

+
+
A layer is a tar archive bundling partial content from a filesystem.
+

+ Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. +

+
+ +

Manifest

+
A manifest is the JSON representation of an image.
+ +

Namespace

+
A namespace is a collection of repositories with a common name prefix.
+

+ The namespace with an empty prefix is considered the Global Namespace. +

+
+ +

Registry

+
A registry is a service that let you store and deliver images.
+
+ +

Repository

+
+
A repository is a set of data containing all versions of a given image.
+
+ +

Scope

+
A scope is the portion of a namespace onto which a given authorization token is granted.
+ +

Tag

+
A tag is conceptually a "version" of a named image.
+

+ Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". +

+ +
+ + +
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md new file mode 100644 index 00000000..c6ac7ad9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md @@ -0,0 +1,24 @@ + + +# Getting help + +If you need help, or just want to chat, you can reach us: + +- on irc: `#docker-distribution` on freenode +- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) + +If you want to report a bug: + +- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) +- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) + +You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/opensource/get-help/). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy new file mode 100644 index 00000000..5ecf4c3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png new file mode 100644 index 00000000..09de8d23 Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png differ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg new file mode 100644 index 00000000..6c3d680b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg @@ -0,0 +1 @@ +Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md new file mode 100644 index 00000000..6cd6769e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md @@ -0,0 +1,66 @@ + + +# Docker Registry + +## What it is + +The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. +The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). + +## Why use it + +You should use the Registry if you want to: + + * tightly control where your images are being stored + * fully own your images distribution pipeline + * integrate image storage and distribution tightly into your in-house development workflow + +## Alternatives + +Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). + +Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). + +## Requirements + +The Registry is compatible with Docker engine **version 1.6.0 or higher**. +If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry). + +## TL;DR + +Start your registry + + docker run -d -p 5000:5000 --name registry registry:2 + +Pull (or build) some image from the hub + + docker pull ubuntu + +Tag the image so that it points to your registry + + docker tag ubuntu localhost:5000/myfirstimage + +Push it + + docker push localhost:5000/myfirstimage + +Pull it back + + docker pull localhost:5000/myfirstimage + +Now stop your registry and remove all data + + docker stop registry && docker rm -v registry + +## Next + +You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). + \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md new file mode 100644 index 00000000..9ccb5419 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md @@ -0,0 +1,90 @@ + + +# Insecure Registry + +While it's highly recommended to secure your registry using a TLS certificate issued by a known CA, you may alternatively decide to use self-signed certificates, or even use your registry over plain http. + +You have to understand the downsides in doing so, and the extra burden in configuration. + +## Deploying a plain HTTP registry + +> **Warning**: it's not possible to use an insecure registry with basic authentication + +This basically tells Docker to entirely disregard security for your registry. + +1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`) +2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` + +**Pros:** + + - relatively easy to configure + +**Cons:** + + - this is **very** insecure: you are basically exposing yourself to trivial MITM, and this solution should only be used for isolated testing or in a tightly controlled, air-gapped environment + - you have to configure every docker daemon that wants to access your registry + +## Using self-signed certificates + +> **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below) + +Generate your own certificate: + + mkdir -p certs && openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 365 -out certs/domain.crt + +Be sure to use the name `myregistrydomain.com` as a CN. + +Use the result to [start your registry with TLS enabled](https://github.com/docker/distribution/blob/master/docs/deploying.md#get-a-certificate) + +Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`. + +Don't forget to restart docker after doing so. + +**Pros:** + + - more secure than the insecure registry solution + +**Cons:** + + - you have to configure every docker daemon that wants to access your registry + +## Failing... + +Failing to configure docker and trying to pull from a registry that is not using TLS will result in the following message: + +``` +FATA[0000] Error response from daemon: v1 ping attempt failed with error: +Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. +If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add +`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. +In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; +simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt +``` + +## Docker still complains about the certificate when using authentication? + +When using authentication, some versions of docker also require you to trust the certificate at the OS level. + +Usually, on Ubuntu this is done with: + + cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt + update-ca-certificates + +... and on Red Hat (and its derivatives) with: + + cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt + update-ca-trust + +... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled: + + update-ca-trust enable + +Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md new file mode 100644 index 00000000..aefefc34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md @@ -0,0 +1,55 @@ + + +# Understanding the Registry + +A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. + + > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. + +Users interact with a registry by using docker push and pull commands. + + > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. + +Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, Ceph Rados, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). + +Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. + +The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. + +Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. + +## Understanding image naming + +Image names as used in typical docker commands reflect their origin: + + * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command + * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` + +You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). + +## Use cases + +Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. + +It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. + +Finally, it's the best way to distribute images inside an isolated network. + +## Requirements + +You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. + +Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. + +## Next + +Dive into [deploying your registry](deploying.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md new file mode 100644 index 00000000..da0aba91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md @@ -0,0 +1,30 @@ + + +# Migrating a 1.0 registry to 2.0 + +TODO: This needs to be revised in light of Olivier's work + +A few thoughts here: + +There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. +The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. +One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. + +----- + +The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: + +1. Configure and test a 2.0 registry image in a sandbox environment. + +2. Back up up your production image storage. + + Your production image storage should reside on a volume or storage backend. + Make sure you have a backup of its contents. + +3. Stop your existing registry service. + +4. Restart your registry with your tested 2.0 image. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md new file mode 100644 index 00000000..feb2630c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md @@ -0,0 +1,72 @@ + + +# Registry as a pull through cache + +## Use-case + +If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network. + +### Alternatives + +Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry. + +Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario. + +### Gotcha + +It's currently not possible to mirror another private registry. Only the central Hub can be mirrored. + +### Solution + +The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. + +## How does it work? + +The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. + +### What if the content changes on the Hub? + +When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. + +### What about my disk? + +In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. + +To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. + +## Running a Registry as a pull through cache + +The easiest way to run a registry as a pull through cache is to run the official Registry image. + +Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. + +### Configuring the cache + +To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. + +In order to access private images on the Docker Hub, a username and password can be supplied. + + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +> :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! + +### Configuring the Docker daemon + +You will need to pass the `--registry-mirror` option to your Docker daemon on startup: + + docker --registry-mirror=https:// daemon + +For example, if your mirror is serving on http://10.0.0.2:5000, you would run: + + docker --registry-mirror=https://10.0.0.2:5000 daemon + +NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md new file mode 100644 index 00000000..17b92f44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md @@ -0,0 +1,188 @@ + + +# Authenticating proxy with nginx + + +## Use-case + +People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. + +Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. + +### Alternatives + +If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). + +### Solution + +With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. + +While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example. + +We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. + +### Gotchas + +While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required. + +For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: + +``` +X-Real-IP +X-Forwarded-For +X-Forwarded-Proto +``` + +So if you have an nginx sitting behind it, should remove these lines from the example config below: + +``` +X-Real-IP $remote_addr; # pass on real client's IP +X-Forwarded-For $proxy_add_x_forwarded_for; +X-Forwarded-Proto $scheme; +``` + +Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). + +## Setting things up + +Read again [the requirements](recipes.md#requirements). + +Ready? + +-- + +Create the required directories + +``` +mkdir -p auth +mkdir -p data +``` + +Create the main nginx configuration you will use. + +``` + +cat < auth/nginx.conf +events { + worker_connections 1024; +} + +http { + + upstream docker-registry { + server registry:5000; + } + + ## Set a variable to help us decide if we need to add the + ## 'Docker-Distribution-Api-Version' header. + ## The registry always sets this header. + ## In the case of nginx performing auth, the header will be unset + ## since nginx is auth-ing before proxying. + map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version { + 'registry/2.0' ''; + default registry/2.0; + } + + server { + listen 443 ssl; + server_name myregistrydomain.com; + + # SSL + ssl_certificate /etc/nginx/conf.d/domain.crt; + ssl_certificate_key /etc/nginx/conf.d/domain.key; + + # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting. + auth_basic "Registry realm"; + auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; + + ## If $docker_distribution_api_version is empty, the header will not be added. + ## See the map directive above where this variable is defined. + add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always; + + proxy_pass http://docker-registry; + proxy_set_header Host \$http_host; # required for docker client's sake + proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$scheme; + proxy_read_timeout 900; + } + } +} +EOF +``` + +Now create a password file for "testuser" and "testpassword" + +``` +docker run --rm --entrypoint htpasswd registry:2 -bn testuser testpassword > auth/nginx.htpasswd +``` + +Copy over your certificate files + +``` +cp domain.crt auth +cp domain.key auth +``` + +Now create your compose file + +``` +cat < docker-compose.yml +nginx: + image: "nginx:1.9" + ports: + - 5043:443 + links: + - registry:registry + volumes: + - ./auth:/etc/nginx/conf.d + - ./auth/nginx.conf:/etc/nginx/nginx.conf:ro + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + volumes: + - `pwd`./data:/var/lib/registry +EOF +``` + +## Starting and stopping + +Now, start your stack: + + docker-compose up -d + +Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image: + + docker login -p=testuser -u=testpassword -e=root@example.ch myregistrydomain.com:5043 + docker tag ubuntu myregistrydomain.com:5043/test + docker push myregistrydomain.com:5043/test + docker pull myregistrydomain.com:5043/test diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md new file mode 100644 index 00000000..a832239d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md @@ -0,0 +1,320 @@ + + +# Notifications + +The Registry supports sending webhook notifications in response to events +happening within the registry. Notifications are sent in response to manifest +pushes and pulls and layer pushes and pulls. These actions are serialized into +events. The events are queued into a registry-internal broadcast system which +queues and dispatches events to [_Endpoints_](#endpoints). + +![](images/notifications.png) + +## Endpoints + +Notifications are sent to _endpoints_ via HTTP requests. Each configured +endpoint has isolated queues, retry configuration and http targets within each +instance of a registry. When an action happens within the registry, it is +converted into an event which is dropped into an inmemory queue. When the +event reaches the end of the queue, an http request is made to the endpoint +until the request succeeds. The events are sent serially to each endpoint but +order is not guaranteed. + +## Configuration + +To setup a registry instance to send notifications to endpoints, one must add +them to the configuration. A simple example follows: + + notifications: + endpoints: + - name: alistener + url: https://mylistener.example.com/event + headers: + Authorization: [Bearer ] + timeout: 500ms + threshold: 5 + backoff: 1s + +The above would configure the registry with an endpoint to send events to +`https://mylistener.example.com/event`, with the header "Authorization: Bearer +". The request would timeout after 500 milliseconds. If +5 failures happen consecutively, the registry will backoff for 1 second before +trying again. + +For details on the fields, please see the [configuration documentation](configuration.md#notifications). + +A properly configured endpoint should lead to a log message from the registry +upon startup: + +``` +INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry +``` + +## Events + +Events have a well-defined JSON structure and are sent as the body of +notification requests. One or more events are sent in a structure called an +envelope. Each event has a unique id that can be used to uniquely identify incoming +requests, if required. Along with that, an _action_ is provided with a +_target, identifying the object mutated during the event. + +The fields available in an event are described in detail in the +[godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). + +**TODO:** Let's break out the fields here rather than rely on the godoc. + +The following is an example of a JSON event, sent in response to the push of a +manifest: + +```json +{ + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "size": 1, + "digest": "sha256:0123456789abcdef0", + "length": 1, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } +} +``` + +> __NOTE:__ As of version 2.1, the `length` field for event targets +> is being deprecated for the `size` field, bringing the target in line with +> common nomenclature. Both will continue to be set for the foreseeable +> future. Newer code should favor `size` but accept either. + +## Envelope + +The envelope contains one or more events, with the following json structure: + +```json +{ + "events": [ ... ], +} +``` + +While events may be sent in the same envelope, the set of events within that +envelope have no implied relationship. For example, the registry may choose to +group unrelated events and send them in the same envelope to reduce the total +number of requests. + +The full package has the mediatype +"application/vnd.docker.distribution.events.v1+json", which will be set on the +request coming to an endpoint. + +An example of a full event may look as follows: + +```json +GET /callback +Host: application/vnd.docker.distribution.events.v1+json +Authorization: Bearer +Content-Type: application/vnd.docker.distribution.events.v1+json + +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "length": 1, + "digest": "sha256:0123456789abcdef0", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 2, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 3, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} +``` + +## Responses + +The registry is fairly accepting of the response codes from endpoints. If an +endpoint responds with any 2xx or 3xx response code (after following +redirects), the message will be considered delivered and discarded. + +In turn, it is recommended that endpoints are accepting of incoming responses, +as well. While the format of event envelopes are standardized by media type, +any "pickyness" about validation may cause the queue to backup on the +registry. + +## Monitoring + +The state of the endpoints are reported via the debug/vars http interface, +usually configured to `http://localhost:5001/debug/vars`. Information such as +configuration and metrics are available by endpoint. + +The following provides an example of a few endpoints that have experienced +several failures and have since recovered: + +```json +"notifications":{ + "endpoints":[ + { + "name":"local-5003", + "url":"http://localhost:5003/callback", + "Headers":{ + "Authorization":[ + "Bearer \u003can example token\u003e" + ] + }, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":76, + "Events":76, + "Successes":0, + "Failures":0, + "Errors":46, + "Statuses":{ + + } + } + }, + { + "name":"local-8083", + "url":"http://localhost:8083/callback", + "Headers":null, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":0, + "Events":76, + "Successes":76, + "Failures":0, + "Errors":28, + "Statuses":{ + "202 Accepted":76 + } + } + } + ] +} +``` + +If using notification as part of a larger application, it is _critical_ to +monitor the size ("Pending" above) of the endpoint queues. If failures or +queue sizes are increasing, it can indicate a larger problem. + +The logs are also a valuable resource for monitoring problems. A failing +endpoint will lead to messages similar to the following: + +``` +ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying +WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off +``` + +The above indicates that several errors have led to a backoff and the registry +will wait before retrying. + +## Considerations + +Currently, the queues are inmemory, so endpoints should be _reasonably +reliable_. They are designed to make a best-effort to send the messages but if +an instance is lost, messages may be dropped. If an endpoint goes down, care +should be taken to ensure that the registry instance is not terminated before +the endpoint comes back up or messages will be lost. + +This can be mitigated by running endpoints in close proximity to the registry +instances. One could run an endpoint that pages to disk and then forwards a +request to provide better durability. + +The notification system is designed around a series of interchangeable _sinks_ +which can be wired up to achieve interesting behavior. If this system doesn't +provide acceptable guarantees, adding a transactional `Sink` to the registry +is a possibility, although it may have an effect on request service time. +Please see the +[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) +for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md new file mode 100644 index 00000000..15a26ff4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md @@ -0,0 +1,79 @@ + + +# OS X Setup Guide + +## Use-case + +This is useful if you intend to run a registry server natively on OS X. + +### Alternatives + +You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. + +The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](https://docs.docker.com/machine/), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. + +### Solution + +Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. + +### Gotchas + +Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. + +## Setup golang on your machine + +If you know, safely skip to the next section. + +If you don't, the TLDR is: + + bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) + source ~/.gvm/scripts/gvm + gvm install go1.4.2 + gvm use go1.4.2 + +If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html). + +## Checkout the Docker Distribution source tree + + mkdir -p $GOPATH/src/github.com/docker + git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution + cd $GOPATH/src/github.com/docker/distribution + +## Build the binary + + GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries + sudo cp bin/registry /usr/local/libexec/registry + +## Setup + +Copy the registry configuration file in place: + + mkdir /Users/Shared/Registry + cp docs/osx/config.yml /Users/Shared/Registry/config.yml + +## Running the Docker Registry under launchd + +Copy the Docker registry plist into place: + + plutil -lint docs/osx/com.docker.registry.plist + cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ + chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist + +Start the Docker registry: + + launchctl load ~/Library/LaunchAgents/com.docker.registry.plist + +### Restarting the docker registry service + + launchctl stop com.docker.registry + launchctl start com.docker.registry + +### Unloading the docker registry service + + launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist new file mode 100644 index 00000000..0982349f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist @@ -0,0 +1,42 @@ + + + + + Label + com.docker.registry + KeepAlive + + StandardErrorPath + /Users/Shared/Registry/registry.log + StandardOutPath + /Users/Shared/Registry/registry.log + Program + /usr/local/libexec/registry + ProgramArguments + + /usr/local/libexec/registry + /Users/Shared/Registry/config.yml + + Sockets + + http-listen-address + + SockServiceName + 5000 + SockType + dgram + SockFamily + IPv4 + + http-debug-address + + SockServiceName + 5001 + SockType + dgram + SockFamily + IPv4 + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml new file mode 100644 index 00000000..63b8f713 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml @@ -0,0 +1,16 @@ +version: 0.1 +log: + level: info + fields: + service: registry + environment: macbook-air +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /Users/Shared/Registry +http: + addr: 0.0.0.0:5000 + secret: mytokensecret + debug: + addr: localhost:5001 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md new file mode 100644 index 00000000..0b4a4ab5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md @@ -0,0 +1,35 @@ + + +# Recipes + +You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. + +Most users are not expected to have a use for these. + +## Requirements + +You should have followed entirely the basic [deployment guide](deploying.md). + +If you have not, please take the time to do so. + +At this point, it's assumed that: + + * you understand Docker security requirements, and how to configure your docker engines properly + * you have installed Docker Compose + * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates + * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` + * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) + +## The List + + * [using Apache as an authenticating proxy](apache.md) + * [using Nginx as an authenticating proxy](nginx.md) + * [running a Registry on OS X](osx-setup-guide.md) + * [hacking the registry: build instructions](building.md) + * [mirror the Docker Hub](mirror.md) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md new file mode 100644 index 00000000..d46ed93c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md @@ -0,0 +1,4817 @@ + + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification these changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image +definition and improves security. This specification will build on that work, +leveraging new properties of the manifest format to improve performance, +reduce bandwidth usage and decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occured. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less the + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the _Detail_ section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a `Docker- +Content-Digest` header. This will include the digest of the target entity +returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header `Docker- +> Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see docker/docker#8093 +for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble the it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +_Completed Upload_ section for details on the parameters and expected +responses. + +Additionally, the upload can be completed with a single `POST` request to +the uploads endpoint, including the "size" and "digest" parameters: + +``` +POST /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +On the registry service, this should allocate a download, accept and verify +the data and return the same response as the final chunk of an upload. If the +POST request fails collecting the data in any way, the registry should attempt +to return an error response to the client with the `Location` header providing +a place to continue the download. + +The single `POST` method is provided for convenience and most clients should +implement `POST` + `PUT` to support reliable resume of uploads. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, a HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those specified in +the URL. The `reference` field may be a "tag" or a "digest". + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the _PUT Manifest section +for details on possible error codes that may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the _Pagination_ section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been recieved. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with above value from the `Link` header, +receiving the values _c_ and _d_. Note that n may change on second to last +response or be omitted fully, if the server may so choose. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | +| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | +| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | +| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | +| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | +| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | +| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | +| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | +| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | +| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | + + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| + `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. + `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. + `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. + `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. + `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. + `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. + `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. + `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. + `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. + `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. + `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. + `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. + `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. + `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. + + + +### Base + +Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. + + + +#### GET Base + +Check that the endpoint implements Docker Registry API V2. + + + +``` +GET /v2/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| + + + + +###### On Success: OK + +``` +200 OK +``` + +The API implements V2 protocol and is accessible. + + + + +###### On Failure: Not Found + +``` +404 Not Found +``` + +The registry does not implement the V2 API. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + + + +### Tags + +Retrieve information about tags. + + + +#### GET Tags + +Fetch the tags under the repository identified by `name`. + + +##### Tags + +``` +GET /v2//tags/list +Host: +Authorization: +``` + +Return all tags for the repository + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +##### Tags Paginated + +``` +GET /v2//tags/list?n=&last= +``` + +Return a portion of the tags for the specified repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`name`|path|Name of the target repository.| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ], +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + + +### Manifest + +Create, update, delete and retrieve manifests. + + + +#### GET Manifest + +Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + + +``` +GET /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: OK + +``` +200 OK +Docker-Content-Digest: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + +The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The name or reference was invalid. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + +#### PUT Manifest + +Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. + + + +``` +PUT /v2//manifests/ +Host: +Authorization: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Content-Digest: +``` + +The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location url of the uploaded manifest.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Manifest + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | +| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | +| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Missing Layer(s) + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +} +``` + +One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + +#### DELETE Manifest + +Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. + + + +``` +DELETE /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Accepted + +``` +202 Accepted +``` + + + + + + +###### On Failure: Invalid Name or Reference + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` were invalid and the delete was unable to proceed. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Unknown Manifest + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + + +### Blob + +Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. + + + +#### GET Blob + +Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + +##### Fetch Blob + +``` +GET /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Docker-Content-Digest: +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob content.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + +###### On Success: Temporary Redirect + +``` +307 Temporary Redirect +Location: +Docker-Content-Digest: +``` + +The blob identified by `digest` is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location where the layer should be accessible.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +##### Fetch Blob Part + +``` +GET /v2//blobs/ +Host: +Authorization: +Range: bytes=- +``` + +This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Range`|header|HTTP Range header specifying blob chunk.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Partial Content + +``` +206 Partial Content +Content-Length: +Content-Range: bytes -/ +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob chunk.| +|`Content-Range`|Content range of blob chunk.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + +#### DELETE Blob + +Delete the blob identified by `name` and `digest` + + + +``` +DELETE /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Docker-Content-Digest: +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|0| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Method Not Allowed + +``` +405 Method Not Allowed +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + + +### Initiate Blob Upload + +Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. + + + +#### POST Initiate Blob Upload + +Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. + + +##### Initiate Monolithic Blob Upload + +``` +POST /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octect-stream + + +``` + +Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|| +|`name`|path|Name of the target repository.| +|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been created in the registry and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +##### Initiate Resumable Blob Upload + +``` +POST /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Initiate a resumable blob upload with an empty request body. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Location: /v2//blobs/uploads/ +Range: 0-0 +Docker-Upload-UUID: +``` + +The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +##### Mount Blob + +``` +POST /v2//blobs/uploads/?mount=&from= +Host: +Authorization: +Content-Length: 0 +``` + +Mount a blob identified by the `mount` parameter from another repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`mount`|query|Digest of blob to mount from the source repository.| +|`from`|query|Name of the source repository.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been mounted in the repository and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + + +### Blob Upload + +Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. + + + +#### GET Blob Upload + +Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. + + + +``` +GET /v2//blobs/uploads/ +Host: +Authorization: +``` + +Retrieve the progress of the current upload, as reported by the `Range` header. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Progress + +``` +204 No Content +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The upload is known and in progress. The last received offset is available in the `Range` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + +#### PATCH Blob Upload + +Upload a chunk of data for the specified upload. + + +##### Stream upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Type: application/octet-stream + + +``` + +Upload a stream of data to upload without completing the upload. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Data Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +##### Chunked upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Range: - +Content-Length: +Content-Type: application/octet-stream + + +``` + +Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Chunk Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + +#### PUT Blob Upload + +Complete the upload specified by `uuid`, optionally appending the body as the final chunk. + + + +``` +PUT /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octet-stream + + +``` + +Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| +|`digest`|query|Digest of uploaded blob.| + + + + +###### On Success: Upload Complete + +``` +204 No Content +Location: +Content-Range: - +Content-Length: 0 +Docker-Content-Digest: +``` + +The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location of the blob for retrieval| +|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + +#### DELETE Blob Upload + +Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. + + + +``` +DELETE /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Cancel the upload specified by `uuid`. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Deleted + +``` +204 No Content +Content-Length: 0 +``` + +The upload has been successfully deleted. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +An error was encountered processing the delete. The client may ignore this error. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + + + +### Catalog + +List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. + + + +#### GET Catalog + +Retrieve a sorted, json list of repositories available in the registry. + + +##### Catalog Fetch Complete + +``` +GET /v2/_catalog +``` + +Request an unabridged list of repositories available. + + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] +} +``` + +Returns the unabridged list of repositories as a json response. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +##### Catalog Fetch Paginated + +``` +GET /v2/_catalog?n=&last= +``` + +Return the specified portion of repositories. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +} +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl new file mode 100644 index 00000000..0419666b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl @@ -0,0 +1,1176 @@ + + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification these changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image +definition and improves security. This specification will build on that work, +leveraging new properties of the manifest format to improve performance, +reduce bandwidth usage and decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occured. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less the + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the _Detail_ section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a `Docker- +Content-Digest` header. This will include the digest of the target entity +returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header `Docker- +> Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see docker/docker#8093 +for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble the it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +_Completed Upload_ section for details on the parameters and expected +responses. + +Additionally, the upload can be completed with a single `POST` request to +the uploads endpoint, including the "size" and "digest" parameters: + +``` +POST /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +On the registry service, this should allocate a download, accept and verify +the data and return the same response as the final chunk of an upload. If the +POST request fails collecting the data in any way, the registry should attempt +to return an error response to the client with the `Location` header providing +a place to continue the download. + +The single `POST` method is provided for convenience and most clients should +implement `POST` + `PUT` to support reliable resume of uploads. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, a HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those specified in +the URL. The `reference` field may be a "tag" or a "digest". + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the _PUT Manifest section +for details on possible error codes that may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the _Pagination_ section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been recieved. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with above value from the `Link` header, +receiving the values _c_ and _d_. Note that n may change on second to last +response or be omitted fully, if the server may so choose. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | +{{end}}{{end}} + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} +{{end}} + +{{range $route := .RouteDescriptors}} +### {{.Entity}} + +{{.Description}} + +{{range $method := $route.Methods}} + +#### {{.Method}} {{$route.Entity}} + +{{.Description}} + +{{if .Requests}}{{range .Requests}}{{if .Name}} +##### {{.Name}}{{end}} + +``` +{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} + +{{if or .Headers .PathParameters .QueryParameters}} +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| +{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| +{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| +{{end}}{{end}} + +{{if .Successes}} +{{range .Successes}} +###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Fields}}The following fields may be returned in the response body: + +|Name|Description| +|----|-----------| +{{range .Fields}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{if .Headers}} +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{end}}{{end}} + +{{if .Failures}} +{{range .Failures}} +###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Headers}} +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}} + +{{if .ErrorCodes}} +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | +{{end}} + +{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} + +{{end}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md new file mode 100644 index 00000000..b0d31256 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md @@ -0,0 +1,14 @@ + + +# Docker Registry v2 authentication + +See the [Token Authentication Specification](token.md) and +[Token Authentication Implementation](jwt.md) for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md new file mode 100644 index 00000000..f627b17a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md @@ -0,0 +1,324 @@ + + +# Docker Registry v2 Bearer token specification + +This specification covers the `docker/distribution` implementation of the +v2 Registry's authentication schema. Specifically, it describes the JSON +Web Token schema that `docker/distribution` has adopted to implement the +client-opaque Bearer token issued by an authentication service and +understood by the registry. + +This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) + +## Getting a Bearer Token + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. As of Docker 1.8, the +registry client in the Docker Engine only supports Basic Authentication to +these token servers. If an attempt to authenticate to the token server fails, +the token server should return a `401 Unauthorized` response indicating that +the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server will now construct a JSON Web Token to sign and return. A JSON Web +Token has 3 main parts: + +1. Headers + + The header of a JSON Web Token is a standard JOSE header. The "typ" field + will be "JWT" and it will also contain the "alg" which identifies the + signing algorithm used to produce the signature. It will also usually have + a "kid" field, the ID of the key which was used to sign the token. + + Here is an example JOSE Header for a JSON Web Token (formatted with + whitespace for readability): + + ``` + { + "typ": "JWT", + "alg": "ES256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" + } + ``` + + It specifies that this object is going to be a JSON Web token signed using + the key with the given ID using the Elliptic Curve signature algorithm + using a SHA256 hash. + +2. Claim Set + + The Claim Set is a JSON struct containing these standard registered claim + name fields: + +
+
+ iss (Issuer) +
+
+ The issuer of the token, typically the fqdn of the authorization + server. +
+
+ sub (Subject) +
+
+ The subject of the token; the name or id of the client which + requested it. This should be empty (`""`) if the client did not + authenticate. +
+
+ aud (Audience) +
+
+ The intended audience of the token; the name or id of the service + which will verify the token to authorize the client/subject. +
+
+ exp (Expiration) +
+
+ The token should only be considered valid up to this specified date + and time. +
+
+ nbf (Not Before) +
+
+ The token should not be considered valid before this specified date + and time. +
+
+ iat (Issued At) +
+
+ Specifies the date and time which the Authorization server + generated this token. +
+
+ jti (JWT ID) +
+
+ A unique identifier for this token. Can be used by the intended + audience to prevent replays of the token. +
+
+ + The Claim Set will also contain a private claim name unique to this + authorization server specification: + +
+
+ access +
+
+ An array of access entry objects with the following fields: + +
+
+ type +
+
+ The type of resource hosted by the service. +
+
+ name +
+
+ The name of the resource of the given type hosted by the + service. +
+
+ actions +
+
+ An array of strings which give the actions authorized on + this resource. +
+
+
+
+ + Here is an example of such a JWT Claim Set (formatted with whitespace for + readability): + + ``` + { + "iss": "auth.docker.com", + "sub": "jlhawn", + "aud": "registry.docker.com", + "exp": 1415387315, + "nbf": 1415387015, + "iat": 1415387015, + "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", + "access": [ + { + "type": "repository", + "name": "samalba/my-app", + "actions": [ + "pull", + "push" + ] + } + ] + } + ``` + +3. Signature + + The authorization server will produce a JOSE header and Claim Set with no + extraneous whitespace, i.e., the JOSE Header from above would be + + ``` + {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} + ``` + + and the Claim Set from above would be + + ``` + {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} + ``` + + The utf-8 representation of this JOSE header and Claim Set are then + url-safe base64 encoded (sans trailing '=' buffer), producing: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 + ``` + + for the JOSE Header and + + ``` + eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + for the Claim Set. These two are concatenated using a '.' character, + yielding the string: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + This is then used as the payload to a the `ES256` signature algorithm + specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) + draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) + + This example signature will use the following ECDSA key for the server: + + ``` + { + "kty": "EC", + "crv": "P-256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", + "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", + "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", + "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" + } + ``` + + A resulting signature of the above payload using this key is: + + ``` + QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + + Concatenating all of these together with a `.` character gives the + resulting JWT: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + +This can now be placed in an HTTP response and returned to the client to use to +authenticate to the audience service: + + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} +``` + +## Using the signed token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) + +## Verifying the token + +The registry must now verify the token presented by the user by inspecting the +claim set within. The registry will: + +- Ensure that the issuer (`iss` claim) is an authority it trusts. +- Ensure that the registry identifies as the audience (`aud` claim). +- Check that the current time is between the `nbf` and `exp` claim times. +- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has + not been seen before. + - To enforce this, the registry may keep a record of `jti`s it has seen for + up to the `exp` time of the token to prevent token replays. +- Check the `access` claim value and use the identified resources and the list + of actions authorized to determine whether the token grants the required + level of access for the operation the client is attempting to perform. +- Verify that the signature of the token is valid. + +If any of these requirements are not met, the registry will return a +`403 Forbidden` response to indicate that the token is invalid. + +**Note**: it is only at this point in the workflow that an authorization error +may occur. The token server should *not* return errors when the user does not +have the requested authorization. Instead, the returned token should indicate +whatever of the requested scope the client does have (the intersection of +requested and granted access). If the token does not supply proper +authorization then the registry will return the appropriate error. + +At no point in this process should the registry need to call back to the +authorization server. The registry only needs to be supplied with the trusted +public keys to verify the token signatures. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md new file mode 100644 index 00000000..61e893c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md @@ -0,0 +1,220 @@ + + +# Docker Registry v2 authentication via central service + +This document outlines the v2 Docker registry authentication scheme: + +![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) + +1. Attempt to begin a push/pull operation with the registry. +2. If the registry requires authorization it will return a `401 Unauthorized` + HTTP response with information on how to authenticate. +3. The registry client makes a request to the authorization service for a + Bearer token. +4. The authorization service returns an opaque Bearer token representing the + client's authorized access. +5. The client retries the original request with the Bearer token embedded in + the request's Authorization header. +6. The Registry authorizes the client by validating the Bearer token and the + claim set embedded within it and begins the push/pull session as usual. + +## Requirements + +- Registry clients which can understand and respond to token auth challenges + returned by the resource server. +- An authorization server capable of managing access controls to their + resources hosted by any given service (such as repositories in a Docker + Registry). +- A Docker Registry capable of trusting the authorization server to sign tokens + which clients can use for authorization and the ability to verify these + tokens for single use or for use during a sufficiently short period of time. + +## Authorization Server Endpoint Descriptions + +The described server is meant to serve as a standalone access control manager +for resources hosted by other services which wish to authenticate and manage +authorizations using a separate access control manager. + +A service like this is used by the official Docker Registry to authenticate +clients and verify their authorization to Docker image repositories. + +As of Docker 1.6, the registry client within the Docker Engine has been updated +to handle such an authorization workflow. + +## How to authenticate + +Registry V1 clients first contact the index to initiate a push or pull. Under +the Registry V2 workflow, clients should contact the registry first. If the +registry server requires authentication it will return a `401 Unauthorized` +response with a `WWW-Authenticate` header detailing how to authenticate to this +registry. + +For example, say I (username `jlhawn`) am attempting to push an image to the +repository `samalba/my-app`. For the registry to authorize this, I will need +`push` access to the `samalba/my-app` repository. The registry will first +return this response: + +``` +HTTP/1.1 401 Unauthorized +Content-Type: application/json; charset=utf-8 +Docker-Distribution-Api-Version: registry/2.0 +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +Date: Thu, 10 Sep 2015 19:32:31 GMT +Content-Length: 235 +Strict-Transport-Security: max-age=31536000 + +{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} +``` + +Note the HTTP Response Header indicating the auth challenge: + +``` +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +``` + +This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) + +This challenge indicates that the registry requires a token issued by the +specified token server and that the request the client is attempting will +need to include sufficient access entries in its claim set. To respond to this +challenge, the client will need to make a `GET` request to the URL +`https://auth.docker.io/token` using the `service` and `scope` values from the +`WWW-Authenticate` header. + +## Requesting a Token + +#### Query Parameters + +
+
+ service +
+
+ The name of the service which hosts the resource. +
+
+ scope +
+
+ The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should be specified multiple times if + there is more than one scope entry from the WWW-Authenticate + header. The above example would be specified as: + scope=repository:samalba/my-app:push. +
+
+ + +#### Token Response Fields + +
+
+ token +
+
+ An opaque Bearer token that clients should supply to subsequent + requests in the Authorization header. +
+
+ access_token +
+
+ For compatibility with OAuth 2.0, we will also accept token under the name + access_token. At least one of these fields must be specified, but + both may also appear (for compatibility with older clients). When both are specified, + they should be equivalent; if they differ the client's choice is undefined. +
+
+ expires_in +
+
+ (Optional) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ +#### Example + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. As of Docker 1.8, the +registry client in the Docker Engine only supports Basic Authentication to +these token servers. If an attempt to authenticate to the token server fails, +the token server should return a `401 Unauthorized` response indicating that +the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server then constructs an implementation-specific token with this +intersected set of access, and returns it to the Docker client to use to +authenticate to the audience service (within the indicated window of time): + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": "3600","issued_at": "2009-11-10T23:00:00Z"} +``` + + +## Using the Bearer token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md new file mode 100644 index 00000000..ec937b64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md @@ -0,0 +1,32 @@ + + +# Distribution API Implementations + +This is a list of known implementations of the Distribution API spec. + +## [Docker Distribution Registry](https://github.com/docker/distribution) + +Docker distribution is the reference implementation of the distribution API +specification. It aims to fully implement the entire specification. + +### Releases +#### 2.0.1 (_in development_) +Implements API 2.0.1 + +_Known Issues_ + - No resumable push support + - Content ranges ignored + - Blob upload status will always return a starting range of 0 + +#### 2.0.0 +Implements API 2.0.0 + +_Known Issues_ + - No resumable push support + - No PATCH implementation for blob upload + - Content ranges ignored + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md new file mode 100644 index 00000000..a8916dcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md @@ -0,0 +1,94 @@ + + + + +# Docker Distribution JSON Canonicalization + +To provide consistent content hashing of JSON objects throughout Docker +Distribution APIs, the specification defines a canonical JSON format. Adopting +such a canonicalization also aids in caching JSON responses. + +Note that protocols should not be designed to depend on identical JSON being +generated across different versions or clients. The canonicalization rules are +merely useful for caching and consistency. + +## Rules + +Compliant JSON should conform to the following rules: + +1. All generated JSON should comply with [RFC + 7159](http://www.ietf.org/rfc/rfc7159.txt). +2. Resulting "JSON text" shall always be encoded in UTF-8. +3. Unless a canonical key order is defined for a particular schema, object + keys shall always appear in lexically sorted order. +4. All whitespace between tokens should be removed. +5. No "trailing commas" are allowed in object or array definitions. +6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". + Ampersand "&" is escaped to "\u0026". + +## Examples + +The following is a simple example of a canonicalized JSON string: + +```json +{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} +``` + +## Reference + +### Other Canonicalizations + +The OLPC project specifies [Canonical +JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in +[TUF](http://theupdateframework.com/), which may be used with other +distribution-related protocols, this alternative format has been proposed in +case the original source changes. Specifications complying with either this +specification or an alternative should explicitly call out the +canonicalization format. Except for key ordering, this specification is mostly +compatible. + +### Go + +In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library +will emit canonical JSON by default. Simply using `json.Marshal` will suffice +in most cases: + +```go +incoming := map[string]interface{}{ + "asdf": 1, + "qwer": []interface{}{}, + "zxcv": []interface{}{ + map[string]interface{}{}, + true, + int(1e9), + "tyui", + }, +} + +canonical, err := json.Marshal(incoming) +if err != nil { + // ... handle error +} +``` + +To apply canonical JSON format spacing to an existing serialized JSON buffer, one +can use +[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) +with the following arguments: + +```go +incoming := getBytes() +var canonical bytes.Buffer +if err := json.Indent(&canonical, incoming, "", ""); err != nil { + // ... handle error +} +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md new file mode 100644 index 00000000..418bca5d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md @@ -0,0 +1,163 @@ + + +# Image Manifest Version 2, Schema 1 + +This document outlines the format of of the V2 image manifest. The image +manifest described herein was introduced in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). +It is a provisional manifest to provide a compatibility with the [V1 Image +format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the +requirements are defined for the [V2 Schema 2 +image](https://github.com/docker/distribution/pull/62). + + +Image manifests describe the various constituents of a docker image. Image +manifests can be serialized to JSON format with the following media types: + +Manifest Type | Media Type +------------- | ------------- +manifest | "application/vnd.docker.distribution.manifest.v1+json" +signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" + +*Note that "application/json" will also be accepted for schema 1.* + +References: + + - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) + - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) + +## *Manifest* Field Descriptions + +Manifest provides the base accessible fields for working with V2 image format + in the registry. + +- **`name`** *string* + + name is the name of the image's repository + +- **`tag`** *string* + + tag is the tag of the image + +- **`architecture`** *string* + + architecture is the host architecture on which this image is intended to + run. This is for information purposes and not currently used by the engine + +- **`fsLayers`** *array* + + fsLayers is a list of filesystem layer blob sums contained in this image. + + An fsLayer is a struct consisting of the following fields + - **`blobSum`** *digest.Digest* + + blobSum is the digest of the referenced filesystem image layer. A + digest must be a sha256 hash. + + +- **`history`** *array* + + history is a list of unstructured historical data for v1 compatibility. It + contains ID of the image layer and ID of the layer's parent layers. + + history is a struct consisting of the following fields + - **`v1Compatibility`** string + + V1Compatibility is the raw V1 compatibility information. This will + contain the JSON object describing the V1 of this image. + +- **`schemaVersion`** *int* + + SchemaVersion is the image manifest schema that this image follows. + +>**Note**:the length of `history` must be equal to the length of `fsLayers` and +>entries in each are correlated by index. + +## Signed Manifests + +Signed manifests provides an envelope for a signed image manifest. A signed +manifest consists of an image manifest along with an additional field +containing the signature of the manifest. + +The docker client can verify signed manifests and displays a message to the user. + +### Signing Manifests + +Image manifests can be signed in two different ways: with a *libtrust* private + key or an x509 certificate chain. When signing with an x509 certificate chain, + the public key of the first element in the chain must be the public key + corresponding with the sign key. + +### Signed Manifest Field Description + +Signed manifests include an image manifest and a list of signatures generated +by *libtrust*. A signature consists of the following fields: + + +- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* + + A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) + +- **`signature`** *string* + + A signature for the image manifest, signed by a *libtrust* private key + +- **`protected`** *string* + + The signed protected header + +## Example Manifest + +*Example showing the official 'hello-world' image manifest.* + +``` +{ + "name": "hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + ], + "schemaVersion": 1, + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", + "kty": "EC", + "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", + "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" + }, + "alg": "ES256" + }, + "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", + "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" + } + ] +} + +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-2.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-2.md new file mode 100644 index 00000000..7449e580 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-2.md @@ -0,0 +1,273 @@ + + +# Image Manifest Version 2, Schema 2 + +This document outlines the format of of the V2 image manifest, schema version 2. +The original (and provisional) image manifest for V2 (schema 1), was introduced +in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) +and is specified in the [schema 1 manifest definition](./manifest-v2-1.md) + +This second schema version has two primary goals. The first is to allow +multi-architecture images, through a "fat manifest" which references image +manifests for platform-specific versions of an image. The second is to +move the Docker engine towards content-addressable images, by supporting +an image model where the image's configuration can be hashed to generate +an ID for the image. + +# Media Types + +The following media types are used by the manifest formats described here, and +the resources they reference: + +- `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format) +- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2) +- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest" +- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar +- `application/vnd.docker.container.image.v1+json`: Container config JSON + +## Manifest List + +The manifest list is the "fat manifest" which points to specific image manifests +for one or more platforms. Its use is optional, and relatively few images will +use one of these manifests. A client will distinguish a manifest list from an +image manifest based on the Content-Type returned in the HTTP response. + +## *Manifest List* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses the version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest list. This should be set to + `application/vnd.docker.distribution.manifest.list.v2+json`. + +- **`manifests`** *array* + + The manifests field contains a list of manifests for specific platforms. + + Fields of a object in the manifests list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This will generally be + `application/vnd.docker.image.manifest.v2+json`, but it could also + be `application/vnd.docker.image.manifest.v1+json` if the manifest + list references a legacy schema-1 manifest. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). + + - **`platform`** *object* + + The platform object describes the platform which the image in the + manifest runs on. + + - **`architecture`** *string* + + The architecture field specifies the CPU architecture, for example + `amd64` or `ppc64`. + + - **`os`** *string* + + The os field specifies the operating system, for example + `linux` or `windows`. + + - **`variant`** *string* + + The optional variant field specifies a variant of the CPU, for + example `ppc64le` to specify a little-endian version of a PowerPC + CPU. + + - **`features`** *array* + + The optional features field specifies an array of strings, each + listing a required CPU feature (for example `sse4` or `aes`). + +## Example Manifest List + +*Example showing a simple manifest list pointing to image manifests for two platforms:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7143, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + "platform": { + "architecture": "ppc64", + "os": "linux", + "variant": "ppc64le", + } + }, + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7682, + "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", + "platform": { + "architecture": "x86-64", + "os": "linux", + "features": [ + "sse4" + ] + } + } + ] +} +``` + +# Image Manifest + +The image manifest provides a configuration and a set of layers for a container +image. It's the direct replacement for the schema-1 manifest. + +## *Image Manifest* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest. This should be set to + `application/vnd.docker.distribution.manifest.v2+json`. + +- **`config`** *object* + + The config field references a configuration object for a container, by + digest. This configuration item is a JSON blob that the runtime uses + to set up the container. This new schema uses a tweaked version + of this configuration to allow image content-addressability on the + daemon side. + + Fields of a config object are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should generally be + `application/vnd.docker.container.image.v1+json`. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). + +- **`layers`** *array* + + The layer list is ordered starting from the base image (opposite order of schema1). + + Fields of an item in the layers list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should + generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). + +## Example Image Manifest + +*Example showing an image manifest:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" + } + ], +} +``` + +# Backward compatibility + +The registry will continue to accept uploads of manifests in both the old and +new formats. + +When pushing images, clients which support the new manifest format should first +construct a manifest in the new format. If uploading this manifest fails, +presumably because the registry only supports the old format, the client may +fall back to uploading a manifest in the old format. + +When pulling images, clients indicate support for this new version of the +manifest format by sending the +`application/vnd.docker.distribution.manifest.v2+json` and +`application/vnd.docker.distribution.manifest.list.v2+json` media types in an +`Accept` header when making a request to the `manifests` endpoint. Updated +clients should check the `Content-Type` header to see whether the manifest +returned from the endpoint is in the old format, or is an image manifest or +manifest list in the new format. + +If the manifest being requested uses the new format, and the appropriate media +type is not present in an `Accept` header, the registry will assume that the +client cannot handle the manifest as-is, and rewrite it on the fly into the old +format. If the object that would otherwise be returned is a manifest list, the +registry will look up the appropriate manifest for the x86-64 platform and +linux OS, rewrite that manifest into the old format if necessary, and return +the result to the client. If no suitable manifest is found in the manifest +list, the registry will return a 404 error. + +One of the challenges in rewriting manifests to the old format is that the old +format involves an image configuration for each layer in the manifest, but the +new format only provides one image configuration. To work around this, the +registry will create synthetic image configurations for all layers except the +top layer. These image configurations will not result in runnable images on +their own, but only serve to fill in the parent chain in a compatible way. +The IDs in these synthetic configurations will be derived from hashes of their +respective blobs. The registry will create these configurations and their IDs +using the same scheme as Docker 1.10 when it creates a legacy manifest to push +to a registry which doesn't support the new format. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md new file mode 100644 index 00000000..f994f38a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md @@ -0,0 +1,24 @@ + + + +# Microsoft Azure storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. + +## Parameters + +The following parameters must be used to authenticate and configure the storage driver (case-sensitive): + +* `accountname`: Name of the Azure Storage Account. +* `accountkey`: Primary or Secondary Key for the Storage Account. +* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. +* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. + +[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ +[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md new file mode 100644 index 00000000..2dbad8cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md @@ -0,0 +1,16 @@ + + + +# Filesystem storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. + +## Parameters + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/var/lib/registry`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md new file mode 100644 index 00000000..bb6f2b01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md @@ -0,0 +1,22 @@ + + + +# Google Cloud Storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage. + +## Parameters + +`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). + +`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). + +**Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md new file mode 100644 index 00000000..f43e1510 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md @@ -0,0 +1,18 @@ + + + +# In-memory storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. + +**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. + +## Parameters + +None diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md new file mode 100644 index 00000000..f6f2de97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md @@ -0,0 +1,33 @@ + + +# Aliyun OSS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. + +## Parameters + +* `accesskeyid`: Your access key ID. + +* `accesskeysecret`: Your access key secret. + +* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at + +* `endpoint`: (optional) By default, the endpoint shoulb be `..aliyuncs.com` or `.-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. + +* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at + +* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). + +* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true if not specified. + +* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. + +* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md new file mode 100644 index 00000000..4b630e19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md @@ -0,0 +1,40 @@ + + + +# Ceph RADOS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses +[Ceph RADOS Object Storage][rados] for storage backend. + +## Parameters + +The following parameters must be used to configure the storage driver +(case-sensitive): + +* `poolname`: Name of the Ceph pool +* `username` *optional*: The user to connect as (i.e. admin, not client.admin) +* `chunksize` *optional*: Size of the written RADOS objects. Default value is +4MB (4194304). + +This drivers loads the [Ceph client configuration][rados-config] from the +following regular paths (the first found is used): + +* `$CEPH_CONF` (environment variable) +* `/etc/ceph/ceph.conf` +* `~/.ceph/config` +* `ceph.conf` (in the current working directory) + +## Developing + +To include this driver when building Docker Distribution, use the build tag +`include_rados`. Please see the [building documentation][building] for details. + +[rados]: http://ceph.com/docs/master/rados/ +[rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ +[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md new file mode 100644 index 00000000..0f4326fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md @@ -0,0 +1,83 @@ + + + +# S3 storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. + +## Parameters + +`accesskey`: Your aws access key. + +`secretkey`: Your aws secret key. + +**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. + +`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html + +`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization. + +`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. + +`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) + +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). + +# CloudFront as Middleware with S3 backend + +## Use Case + +Adding CloudFront as a middleware for your S3 backed registry can dramatically improve pull times. Your registry will have the ability to retrieve your images from edge servers, rather than the geographically limited location of your S3 bucket. The farther your registry is from your bucket, the more improvements you will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). + +## Configuring CloudFront for Distribution + +If you are unfamiliar with creating a CloudFront distribution, see [Getting Started with Cloudfront](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html). + +Defaults can be kept in most areas except: + +### Origin: + +The CloudFront distribution must be created such that the `Origin Path` is set to the directory level of the root "docker" key in S3. If your registry exists on the root of the bucket, this path should be left blank. + +### Behaviors: + + - Viewer Protocol Policy: HTTPS Only + - Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE + - Cached HTTP Methods: OPTIONS (checked) + - Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes + - Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts) + +## Registry configuration + +Here the `middleware` option is used. It is still important to keep the `storage` option as CloudFront will only handle `pull` actions; `push` actions are still directly written to S3. + +The following example shows what you will need at minimum: +``` +... +storage: + s3: + region: us-east-1 + bucket: docker.myregistry.com +middleware: + storage: + - name: cloudfront + options: + baseurl: https://abcdefghijklmn.cloudfront.net/ + privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem + keypairid: ABCEDFGHIJKLMNOPQRST +... +``` + +## CloudFront Key-Pair + +A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md new file mode 100644 index 00000000..ca17e9ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md @@ -0,0 +1,200 @@ + + + +# OpenStack Swift storage driver + +An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ authurl + +

URL for obtaining an auth token.

+
+ username + +

+ Your OpenStack user name.

+

+
+ password +

+

+

+ Your OpenStack password. +

+
+ container + +

+ The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. +

+
+ tenant + +

+ Optionally, your OpenStack tenant name. You can either use tenant or tenantid. +

+
+ tenantid + +

+ Optionally, your OpenStack tenant id. You can either use tenant or tenantid. +

+
+ domain + +

+ Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. +

+
+ domainid + +

+ Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. +

+
+ trustid + +

+ Optionally, your OpenStack trust id for Identity v3 API. +

+
+ insecureskipverify + +

+ Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. +

+
+ region + +

+ Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). +

+
+ chunksize + +

+ Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. +

+
+ prefix + +

+ Optionally, supply a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.

+

+
+ secretkey + +

+ Optionally, the secret key used to generate temporary URLs.

+

+
+ accesskey + +

+ Optionally, the access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.

+

+
+ +The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator +disabled that feature, the configuration file can specify the following optional parameters : + + + + + + + + + + +
+ tempurlcontainerkey + +

+ Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.

+

+
+ tempurlmethods + +

+ Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:

+ + - tempurlmethods: + - GET + - PUT + - HEAD + - POST + - DELETE + +

+
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md new file mode 100644 index 00000000..158ad999 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md @@ -0,0 +1,65 @@ + + + +# Docker Registry Storage Driver + +This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. + +## Provided Drivers + +This storage driver package comes bundled with several drivers: + +- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. +- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. +- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. +- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). +- [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. +- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). +- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). +- [gcs](storage-drivers/gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. + +## Storage Driver API + +The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. + +Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. + +Storage drivers are intended to be written in Go, providing compile-time +validation of the `storagedriver.StorageDriver` interface. + +## Driver Selection and Configuration + +The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. + +Storage driver factories may be registered by name using the +`factory.Register` method, and then later invoked by calling `factory.Create` +with a driver name and parameters map. If no such storage driver can be found, +`factory.Create` will return an `InvalidStorageDriverError`. + +## Driver Contribution + +### Writing new storage drivers + +To create a valid storage driver, one must implement the +`storagedriver.StorageDriver` interface and make sure to expose this driver +via the factory system. + +#### Registering + +Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. + +## Testing + +Storage driver test suites are provided in +`storagedriver/testsuites/testsuites.go` and may be used for any storage +driver written in Go. Tests can be registered using the `RegisterSuite` +function, which run the same set of tests for any registered drivers. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/errors.go new file mode 100644 index 00000000..77bd096e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/errors.go @@ -0,0 +1,111 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" +) + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go new file mode 100644 index 00000000..73fcc453 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go @@ -0,0 +1,37 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/docker/distribution/health" +) + +var ( + updater = health.NewStatusUpdater() +) + +// DownHandler registers a manual_http_status that always returns an Error +func DownHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(errors.New("Manual Check")) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// UpHandler registers a manual_http_status that always returns nil +func UpHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(nil) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// init sets up the two endpoints to bring the service up and down +func init() { + health.Register("manual_http_status", updater) + http.HandleFunc("/debug/health/down", DownHandler) + http.HandleFunc("/debug/health/up", UpHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go new file mode 100644 index 00000000..e3c3b08d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go @@ -0,0 +1,62 @@ +package checks + +import ( + "errors" + "net" + "net/http" + "os" + "strconv" + "time" + + "github.com/docker/distribution/health" +) + +// FileChecker checks the existence of a file and returns an error +// if the file exists. +func FileChecker(f string) health.Checker { + return health.CheckFunc(func() error { + if _, err := os.Stat(f); err == nil { + return errors.New("file exists") + } + return nil + }) +} + +// HTTPChecker does a HEAD request and verifies that the HTTP status code +// returned matches statusCode. +func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker { + return health.CheckFunc(func() error { + client := http.Client{ + Timeout: timeout, + } + req, err := http.NewRequest("HEAD", r, nil) + if err != nil { + return errors.New("error creating request: " + r) + } + for headerName, headerValues := range headers { + for _, headerValue := range headerValues { + req.Header.Add(headerName, headerValue) + } + } + response, err := client.Do(req) + if err != nil { + return errors.New("error while checking: " + r) + } + if response.StatusCode != statusCode { + return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode)) + } + return nil + }) +} + +// TCPChecker attempts to open a TCP connection. +func TCPChecker(addr string, timeout time.Duration) health.Checker { + return health.CheckFunc(func() error { + conn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return errors.New("connection to " + addr + " failed") + } + conn.Close() + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go new file mode 100644 index 00000000..194b8a56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go @@ -0,0 +1,130 @@ +// Package health provides a generic health checking framework. +// The health package works expvar style. By importing the package the debug +// server is getting a "/debug/health" endpoint that returns the current +// status of the application. +// If there are no errors, "/debug/health" will return a HTTP 200 status, +// together with an empty JSON reply "{}". If there are any checks +// with errors, the JSON reply will include all the failed checks, and the +// response will be have an HTTP 503 status. +// +// A Check can either be run synchronously, or asynchronously. We recommend +// that most checks are registered as an asynchronous check, so a call to the +// "/debug/health" endpoint always returns immediately. This pattern is +// particularly useful for checks that verify upstream connectivity or +// database status, since they might take a long time to return/timeout. +// +// Installing +// +// To install health, just import it in your application: +// +// import "github.com/docker/distribution/health" +// +// You can also (optionally) import "health/api" that will add two convenience +// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add +// "manual" checks that allow the service to quickly be brought in/out of +// rotation. +// +// import _ "github.com/docker/distribution/registry/health/api" +// +// # curl localhost:5001/debug/health +// {} +// # curl -X POST localhost:5001/debug/health/down +// # curl localhost:5001/debug/health +// {"manual_http_status":"Manual Check"} +// +// After importing these packages to your main application, you can start +// registering checks. +// +// Registering Checks +// +// The recommended way of registering checks is using a periodic Check. +// PeriodicChecks run on a certain schedule and asynchronously update the +// status of the check. This allows CheckStatus to return without blocking +// on an expensive check. +// +// A trivial example of a check that runs every 5 seconds and shuts down our +// server if the current minute is even, could be added as follows: +// +// func currentMinuteEvenCheck() error { +// m := time.Now().Minute() +// if m%2 == 0 { +// return errors.New("Current minute is even!") +// } +// return nil +// } +// +// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) +// +// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to +// implement the exact same check, but add a threshold of failures after which +// the check will be unhealthy. This is particularly useful for flaky Checks, +// ensuring some stability of the service when handling them. +// +// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) +// +// The lowest-level way to interact with the health package is calling +// "Register" directly. Register allows you to pass in an arbitrary string and +// something that implements "Checker" and runs your check. If your method +// returns an error with nil, it is considered a healthy check, otherwise it +// will make the health check endpoint "/debug/health" start returning a 503 +// and list the specific check that failed. +// +// Assuming you wish to register a method called "currentMinuteEvenCheck() +// error" you could do that by doing: +// +// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) +// +// CheckFunc is a convenience type that implements Checker. +// +// Another way of registering a check could be by using an anonymous function +// and the convenience method RegisterFunc. An example that makes the status +// endpoint always return an error: +// +// health.RegisterFunc("my_check", func() error { +// return Errors.new("This is an error!") +// })) +// +// Examples +// +// You could also use the health checker mechanism to ensure your application +// only comes up if certain conditions are met, or to allow the developer to +// take the service out of rotation immediately. An example that checks +// database connectivity and immediately takes the server out of rotation on +// err: +// +// updater = health.NewStatusUpdater() +// health.RegisterFunc("database_check", func() error { +// return updater.Check() +// })) +// +// conn, err := Connect(...) // database call here +// if err != nil { +// updater.Update(errors.New("Error connecting to the database: " + err.Error())) +// } +// +// You can also use the predefined Checkers that come included with the health +// package. First, import the checks: +// +// import "github.com/docker/distribution/health/checks +// +// After that you can make use of any of the provided checks. An example of +// using a `FileChecker` to take the application out of rotation if a certain +// file exists can be done as follows: +// +// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) +// +// After registering the check, it is trivial to take an application out of +// rotation from the console: +// +// # curl localhost:5001/debug/health +// {} +// # touch /tmp/disable +// # curl localhost:5001/debug/health +// {"fileChecker":"file exists"} +// +// You could also test the connectivity to a downstream service by using a +// "HTTPChecker", but ensure that you only mark the test unhealthy if there +// are a minimum of two failures in a row: +// +// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) +package health diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go new file mode 100644 index 00000000..220282dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go @@ -0,0 +1,306 @@ +package health + +import ( + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// A Registry is a collection of checks. Most applications will use the global +// registry defined in DefaultRegistry. However, unit tests may need to create +// separate registries to isolate themselves from other tests. +type Registry struct { + mu sync.RWMutex + registeredChecks map[string]Checker +} + +// NewRegistry creates a new registry. This isn't necessary for normal use of +// the package, but may be useful for unit tests so individual tests have their +// own set of checks. +func NewRegistry() *Registry { + return &Registry{ + registeredChecks: make(map[string]Checker), + } +} + +// DefaultRegistry is the default registry where checks are registered. It is +// the registry used by the HTTP handler. +var DefaultRegistry *Registry + +// Checker is the interface for a Health Checker +type Checker interface { + // Check returns nil if the service is okay. + Check() error +} + +// CheckFunc is a convenience type to create functions that implement +// the Checker interface +type CheckFunc func() error + +// Check Implements the Checker interface to allow for any func() error method +// to be passed as a Checker +func (cf CheckFunc) Check() error { + return cf() +} + +// Updater implements a health check that is explicitly set. +type Updater interface { + Checker + + // Update updates the current status of the health check. + Update(status error) +} + +// updater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type updater struct { + mu sync.Mutex + status error +} + +// Check implements the Checker interface +func (u *updater) Check() error { + u.mu.Lock() + defer u.mu.Unlock() + + return u.status +} + +// Update implements the Updater interface, allowing asynchronous access to +// the status of a Checker. +func (u *updater) Update(status error) { + u.mu.Lock() + defer u.mu.Unlock() + + u.status = status +} + +// NewStatusUpdater returns a new updater +func NewStatusUpdater() Updater { + return &updater{} +} + +// thresholdUpdater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type thresholdUpdater struct { + mu sync.Mutex + status error + threshold int + count int +} + +// Check implements the Checker interface +func (tu *thresholdUpdater) Check() error { + tu.mu.Lock() + defer tu.mu.Unlock() + + if tu.count >= tu.threshold { + return tu.status + } + + return nil +} + +// thresholdUpdater implements the Updater interface, allowing asynchronous +// access to the status of a Checker. +func (tu *thresholdUpdater) Update(status error) { + tu.mu.Lock() + defer tu.mu.Unlock() + + if status == nil { + tu.count = 0 + } else if tu.count < tu.threshold { + tu.count++ + } + + tu.status = status +} + +// NewThresholdStatusUpdater returns a new thresholdUpdater +func NewThresholdStatusUpdater(t int) Updater { + return &thresholdUpdater{threshold: t} +} + +// PeriodicChecker wraps an updater to provide a periodic checker +func PeriodicChecker(check Checker, period time.Duration) Checker { + u := NewStatusUpdater() + go func() { + t := time.NewTicker(period) + for { + <-t.C + u.Update(check.Check()) + } + }() + + return u +} + +// PeriodicThresholdChecker wraps an updater to provide a periodic checker that +// uses a threshold before it changes status +func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { + tu := NewThresholdStatusUpdater(threshold) + go func() { + t := time.NewTicker(period) + for { + <-t.C + tu.Update(check.Check()) + } + }() + + return tu +} + +// CheckStatus returns a map with all the current health check errors +func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type + registry.mu.RLock() + defer registry.mu.RUnlock() + statusKeys := make(map[string]string) + for k, v := range registry.registeredChecks { + err := v.Check() + if err != nil { + statusKeys[k] = err.Error() + } + } + + return statusKeys +} + +// CheckStatus returns a map with all the current health check errors from the +// default registry. +func CheckStatus() map[string]string { + return DefaultRegistry.CheckStatus() +} + +// Register associates the checker with the provided name. +func (registry *Registry) Register(name string, check Checker) { + if registry == nil { + registry = DefaultRegistry + } + registry.mu.Lock() + defer registry.mu.Unlock() + _, ok := registry.registeredChecks[name] + if ok { + panic("Check already exists: " + name) + } + registry.registeredChecks[name] = check +} + +// Register associates the checker with the provided name in the default +// registry. +func Register(name string, check Checker) { + DefaultRegistry.Register(name, check) +} + +// RegisterFunc allows the convenience of registering a checker directly from +// an arbitrary func() error. +func (registry *Registry) RegisterFunc(name string, check func() error) { + registry.Register(name, CheckFunc(check)) +} + +// RegisterFunc allows the convenience of registering a checker in the default +// registry directly from an arbitrary func() error. +func RegisterFunc(name string, check func() error) { + DefaultRegistry.RegisterFunc(name, check) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + registry.Register(name, PeriodicChecker(CheckFunc(check), period)) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// in the default registry from an arbitrary func() error. +func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + DefaultRegistry.RegisterPeriodicFunc(name, period, check) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker in the default registry from an arbitrary func() error. +func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check) +} + +// StatusHandler returns a JSON blob with all the currently registered Health Checks +// and their corresponding status. +// Returns 503 if any Error status exists, 200 otherwise +func StatusHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + checks := CheckStatus() + status := http.StatusOK + + // If there is an error, return 503 + if len(checks) != 0 { + status = http.StatusServiceUnavailable + } + + statusResponse(w, r, status, checks) + } else { + http.NotFound(w, r) + } +} + +// Handler returns a handler that will return 503 response code if the health +// checks have failed. If everything is okay with the health checks, the +// handler will pass through to the provided handler. Use this handler to +// disable a web application when the health checks fail. +func Handler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks := CheckStatus() + if len(checks) != 0 { + errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. + WithDetail("health check failed: please see /debug/health")) + return + } + + handler.ServeHTTP(w, r) // pass through + }) +} + +// statusResponse completes the request with a response describing the health +// of the service. +func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { + p, err := json.Marshal(checks) + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) + p, err = json.Marshal(struct { + ServerError string `json:"server_error"` + }{ + ServerError: "Could not parse error message", + }) + status = http.StatusInternalServerError + + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) + return + } + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.WriteHeader(status) + if _, err := w.Write(p); err != nil { + context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) + } +} + +// Registers global /debug/health api endpoint, creates default registry +func init() { + DefaultRegistry = NewRegistry() + http.HandleFunc("/debug/health", StatusHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 00000000..88367b0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 00000000..49e2b1af --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,147 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifuing an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 00000000..e9fe81be --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,278 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/libtrust" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // name is the name provided to NewConfigManifestBuilder + name string + // tag is the tag provided to NewConfigManifestBuilder + tag string + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, name, tag string, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + name: name, + tag: tag, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, errors.New("number of descriptors and number of layers in rootfs must match") + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.name, + Tag: mb.tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 00000000..160f9cd9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is retuend by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 00000000..36209e34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,92 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, name, tag, architecture string) distribution.ManifestBuilder { + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 00000000..c862dd81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 00000000..fa8daa56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 00000000..70b006a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,74 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 00000000..8d378e99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,125 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + return m.Layers + +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 00000000..c57398bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and . Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifests.go b/Godeps/_workspace/src/github.com/docker/distribution/manifests.go new file mode 100644 index 00000000..aec28e97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifests.go @@ -0,0 +1,111 @@ +package distribution + +import ( + "fmt" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // The references are strictly ordered from base to head. A reference + // is anything which can be represented by a distribution.Descriptor + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the mediatype. + Payload() (mediatype string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error + + // Enumerate fills 'manifests' with the manifests in this service up + // to the size of 'manifests' and returns 'n' for the number of entries + // which were filled. 'last' contains an offset in the manifest set + // and can be used to resume iteration. + //Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error) +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + mediaTypes = append(mediaTypes, t) + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshall functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual content type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + semicolonIndex := strings.Index(ctHeader, ";") + if semicolonIndex != -1 { + mediatype = ctHeader[:semicolonIndex] + } else { + mediatype = ctHeader + } + + unmarshalFunc, ok := mappings[mediatype] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype) + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) + } + mappings[mediatype] = u + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go new file mode 100644 index 00000000..efaa3ac3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go @@ -0,0 +1,160 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/uuid" +) + +type bridge struct { + ub URLBuilder + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name, tag string) (string, error) + BuildBlobURL(name string, dgst digest.Digest) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { + return &bridge{ + ub: ub, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: context.RemoteAddr(r), + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo string, sm distribution.Manifest) error { + return b.createManifestEventAndWrite(EventActionPush, repo, sm) +} + +func (b *bridge) ManifestPulled(repo string, sm distribution.Manifest) error { + return b.createManifestEventAndWrite(EventActionPull, repo, sm) +} + +func (b *bridge) ManifestDeleted(repo string, sm distribution.Manifest) error { + return b.createManifestEventAndWrite(EventActionDelete, repo, sm) +} + +func (b *bridge) BlobPushed(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) +} + +func (b *bridge) BlobPulled(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) +} + +func (b *bridge) BlobMounted(repo string, desc distribution.Descriptor, fromRepo string) error { + event, err := b.createBlobEvent(EventActionMount, repo, desc) + if err != nil { + return err + } + event.Target.FromRepository = fromRepo + return b.sink.Write(*event) +} + +func (b *bridge) BlobDeleted(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionDelete, repo, desc) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo string, sm distribution.Manifest) error { + manifestEvent, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) createManifestEvent(action string, repo string, sm distribution.Manifest) (*Event, error) { + event := b.createEvent(action) + event.Target.Repository = repo + + mt, p, err := sm.Payload() + if err != nil { + return nil, err + } + + event.Target.MediaType = mt + event.Target.Length = int64(len(p)) + event.Target.Size = int64(len(p)) + event.Target.Digest = digest.FromBytes(p) + + event.Target.URL, err = b.ub.BuildManifestURL(repo, event.Target.Digest.String()) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) { + event := b.createEvent(action) + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo + + var err error + event.Target.URL, err = b.ub.BuildBlobURL(repo, desc.Digest) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.Generate().String(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go new file mode 100644 index 00000000..dfdb111c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go @@ -0,0 +1,86 @@ +package notifications + +import ( + "net/http" + "time" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go new file mode 100644 index 00000000..19d6a776 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go @@ -0,0 +1,157 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionMount = "mount" + EventActionDelete = "delete" +) + +const ( + // EventsMediaType is the mediatype for the json event envelope. If the + // Event, ActorRecord, SourceRecord or Envelope structs change, the version + // number should be incremented. + EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + // LayerMediaType is the media type for image rootfs diffs (aka "layers") + // used by Docker. We don't expect this to change for quite a while. + layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // TODO(stevvooe): Use http.DetectContentType for layers, maybe. + + distribution.Descriptor + + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + + // Repository identifies the named repository. + Repository string `json:"repository,omitempty"` + + // FromRepository identifies the named repository which a blob was mounted + // from if appropriate. + FromRepository string `json:"fromRepository,omitempty"` + + // URL provides a direct link to the content. + URL string `json:"url,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorizaton context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorizaton context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go new file mode 100644 index 00000000..465434f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go @@ -0,0 +1,147 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: http.DefaultTransport.(*http.Transport), + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + defer resp.Body.Close() + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go new file mode 100644 index 00000000..21857edf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go @@ -0,0 +1,201 @@ +package notifications + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo string, sm distribution.Manifest) error + ManifestPulled(repo string, sm distribution.Manifest) error + + // TODO(stevvooe): Please note that delete support is still a little shaky + // and we'll need to propagate these in the future. + + ManifestDeleted(repo string, sm distribution.Manifest) error +} + +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo string, desc distribution.Descriptor) error + BlobPulled(repo string, desc distribution.Descriptor) error + BlobMounted(repo string, desc distribution.Descriptor, fromRepo string) error + + // TODO(stevvooe): Please note that delete support is still a little shaky + // and we'll need to propagate these in the future. + + BlobDeleted(repo string, desc distribution.Descriptor) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + BlobListener +} + +type repositoryListener struct { + distribution.Repository + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo distribution.Repository, listener Listener) distribution.Repository { + return &repositoryListener{ + Repository: repo, + listener: listener, + } +} + +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return &manifestServiceListener{ + ManifestService: manifests, + parent: rl, + }, nil +} + +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, + } +} + +type manifestServiceListener struct { + distribution.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm, err := msl.ManifestService.Get(ctx, dgst) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { + logrus.Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := msl.ManifestService.Put(ctx, sm, options...) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { + logrus.Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return dgst, err +} + +type blobServiceListener struct { + distribution.BlobStore + parent *repositoryListener +} + +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) + if err == nil { + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + + return desc, err +} + +func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx, options...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Name(), err.Descriptor, err.From.Name()); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + } + return nil, err + } + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, + } +} + +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener +} + +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) + if err == nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + } + } + + return committed, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go new file mode 100644 index 00000000..2a8ffcbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevent counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevent events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go new file mode 100644 index 00000000..dda4a565 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go @@ -0,0 +1,337 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker heuristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a successful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// heuristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile new file mode 100644 index 00000000..1e2a8471 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:14.04 + +ENV GOLANG_VERSION 1.4rc1 +ENV GOPATH /var/cache/drone +ENV GOROOT /usr/local/go +ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin + +ENV LANG C +ENV LC_ALL C + +RUN apt-get update && apt-get install -y \ + wget ca-certificates git mercurial bzr \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ + tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ + rm go${GOLANG_VERSION}.linux-amd64.tar.gz + +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md new file mode 100644 index 00000000..eda88696 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md @@ -0,0 +1,6 @@ +Git Hooks +========= + +To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. + +As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh new file mode 100644 index 00000000..6afea8a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +cd $(dirname $0) + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +set -e +set -x + +# Just in case the directory doesn't exist +mkdir -p $REPO_ROOT/.git/hooks + +ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit new file mode 100644 index 00000000..3ee2e913 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit @@ -0,0 +1,29 @@ +#!/bin/sh + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +cd $REPO_ROOT + +GOFMT_ERRORS=$(gofmt -s -l . 2>&1) +if [ -n "$GOFMT_ERRORS" ]; then + printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr + exit 1 +fi + +GOLINT_ERRORS=$(golint ./... 2>&1) +if [ -n "$GOLINT_ERRORS" ]; then + printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr + exit 1 +fi + +GOVET_ERRORS=$(go vet ./... 2>&1) +GOVET_STATUS=$? +if [ "$GOVET_STATUS" -ne "0" ]; then + printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr + exit $GOVET_STATUS +fi diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go b/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go new file mode 100644 index 00000000..c188472a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,334 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := repository [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + + "github.com/docker/distribution/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + // TODO(dmcgowan): Provide more specific and helpful error + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return d.String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go b/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 00000000..a4ffe5b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the seperating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry.go new file mode 100644 index 00000000..ce5d7779 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry.go @@ -0,0 +1,71 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name string) (Repository, error) + + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Name returns the name of the repository. + Name() string + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 00000000..9a405d21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,258 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 00000000..49a64a86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 00000000..01c34384 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,128 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavialability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 00000000..ad3da3ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1569 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` + + unauthorizedErrorsBody = `{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particalar request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particalar response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status recieved by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch Complete", + Description: "Request an unabridged list of repositories available.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 00000000..ece52a2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verfication. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 00000000..5b80d5be --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 00000000..6ba39cc9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,236 @@ +package v2 + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/digest" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { + var scheme string + + forwardedProto := r.Header.Get("X-Forwarded-Proto") + + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case r.TLS != nil: + scheme = "https" + case len(r.URL.Scheme) > 0: + scheme = r.URL.Scheme + default: + scheme = "http" + } + + host := r.Host + forwardedHost := r.Header.Get("X-Forwarded-Host") + if len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + manifestURL, err := route.URL("name", name, "reference", reference) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", name, "digest", dgst.String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name, "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root} +} + +type clonedRoute struct { + *mux.Route + root *url.URL +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 00000000..b3bb580d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,144 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.SetHeaders(w) +// w.WriteHeader(http.StatusUnauthorized) +// return +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/context" +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case "auth.user": + return uic.user + case "auth.user.name": + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go new file mode 100644 index 00000000..82d3556d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -0,0 +1,102 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "errors" + "fmt" + "net/http" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + ErrAuthenticationFailure = errors.New("authentication failured") +) + +type accessController struct { + realm string + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + f, err := os.Open(path.(string)) + if err != nil { + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + return nil, err + } + + return &accessController{realm: realm.(string), htpasswd: h}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: ErrInvalidCredential, + } + } + + if err := ac.htpasswd.authenticateUser(username, password); err != nil { + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 00000000..494ad0a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,80 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if an syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go new file mode 100644 index 00000000..2b801d94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go @@ -0,0 +1,97 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 00000000..5b1ff7ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,268 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go new file mode 100644 index 00000000..1d04f104 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go new file mode 100644 index 00000000..166816ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go @@ -0,0 +1,343 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Errorf("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Errorf("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentUnixTime := time.Now().Unix() + if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { + log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Error("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Error(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Errorf("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case len(rawJWK) > 0: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go new file mode 100644 index 00000000..d7f95be4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 00000000..7d8f1d95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go new file mode 100644 index 00000000..a6ad45d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -0,0 +1,219 @@ +package auth + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// ChallengeManager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type ChallengeManager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint string) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleChallengeManager returns an instance of +// ChallengeManger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleChallengeManager() ChallengeManager { + return simpleChallengeManager{} +} + +type simpleChallengeManager map[string][]Challenge + +func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { + challenges := m[endpoint] + return challenges, nil +} + +func (m simpleChallengeManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + m[urlCopy.String()] = challenges + + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 00000000..6b483c62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,326 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/transport" +) + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges ChallengeManager + handlers []AuthenticationHandler + transport http.RoundTripper +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + v2Root := strings.Index(req.URL.Path, "/v2/") + if v2Root == -1 { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: req.URL.Path[:v2Root+4], + } + + pingEndpoint := ping.String() + + challenges, err := ea.challenges.GetChallenges(pingEndpoint) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, challenge := range challenges { + if challenge.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + +type tokenHandler struct { + header http.Header + creds CredentialStore + scope tokenScope + transport http.RoundTripper + clock clock + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time + + additionalScopes map[string]struct{} +} + +// tokenScope represents the scope at which a token will be requested. +// This represents a specific action on a registry resource. +type tokenScope struct { + Resource string + Scope string + Actions []string +} + +func (ts tokenScope) String() string { + return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +} + +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + return newTokenHandler(transport, creds, realClock{}, scope, actions...) +} + +// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. +func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { + return &tokenHandler{ + transport: transport, + creds: creds, + clock: c, + scope: tokenScope{ + Resource: "repository", + Scope: scope, + Actions: actions, + }, + additionalScopes: map[string]struct{}{}, + } +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, tokenScope{ + Resource: "repository", + Scope: fromParam, + Actions: []string{"pull"}, + }.String()) + } + if err := th.refreshToken(params, additionalScopes...); err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + + return nil +} + +func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + var addedScopes bool + for _, scope := range additionalScopes { + if _, ok := th.additionalScopes[scope]; !ok { + th.additionalScopes[scope] = struct{}{} + addedScopes = true + } + } + now := th.clock.Now() + if now.After(th.tokenExpiration) || addedScopes { + tr, err := th.fetchToken(params) + if err != nil { + return err + } + th.tokenCache = tr.Token + th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) + } + + return nil +} + +type tokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` +} + +func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { + //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) + realm, ok := params["realm"] + if !ok { + return nil, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme + + realmURL, err := url.Parse(realm) + if err != nil { + return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return nil, err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := th.scope.String() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + for scope := range th.additionalScopes { + reqParams.Add("scope", scope) + } + + if th.creds != nil { + username, password := th.creds.Basic(realmURL) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return nil, err + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return nil, fmt.Errorf("unable to decode token response: %s", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return nil, errors.New("authorization server did not include a token in the response") + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now() + } + + return tr, nil +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return errors.New("no basic auth credentials") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 00000000..21a018dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,176 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { + newOffset := hbu.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset += int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + hbu.offset = newOffset + + return hbu.offset, nil +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 00000000..8e3cb108 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,72 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + Response: body, + } + } + return errors +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode == 401 { + err := parseHTTPErrorResponse(resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + return parseHTTPErrorResponse(resp.Body) + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 00000000..d6521211 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,747 @@ +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + if _, err := reference.ParseNamed(name); err != nil { + return nil, err + } + + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name string +} + +func (r *repository) Name() string { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.Name(), + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.Name(), + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.Name(), + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Name(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name string +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = tagsResponse.Tags + return tags, nil + } + return tags, HandleErrorResponse(resp) +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + u, err := t.ub.BuildManifestURL(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + var attempts int + resp, err := t.client.Head(u) + +check: + if err != nil { + return distribution.Descriptor{}, err + } + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + case resp.StatusCode == http.StatusMethodNotAllowed: + resp, err = t.client.Get(u) + attempts++ + if attempts > 1 { + return distribution.Descriptor{}, err + } + goto check + default: + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name string + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + + var tag string + for _, option := range options { + if opt, ok := option.(withTagOption); ok { + tag = opt.tag + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + var ref string + if tag != "" { + ref = tag + } else { + ref = dgst.String() + } + + u, err := ms.ub.BuildManifestURL(ms.name, ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[ref]; ok { + req.Header.Set("If-None-Match", ms.etags[ref]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// WithTag allows a tag to be passed into Put which enables the client +// to build a correct URL. +func WithTag(tag string) distribution.ManifestServiceOption { + return withTagOption{tag} +} + +type withTagOption struct{ tag string } + +func (o withTagOption) Apply(m distribution.ManifestService) error { + if _, ok := m.(*manifests); ok { + return nil + } + return fmt.Errorf("withTagOption is a client-only option") +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. This state is written and read under a lock. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var tag string + + for _, option := range options { + if opt, ok := option.(withTagOption); ok { + tag = opt.tag + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return "", err + } + + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name string + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.New() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name string + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + u, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 00000000..b27b6c23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,203 @@ +package transport + +import ( + "bufio" + "errors" + "fmt" + "io" + "net/http" + "os" +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // brd is a buffer for internal buffered io. + brd *bufio.Reader + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we seeked to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + // Simulate io.EOF error if we reach filesize. + if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { + err = io.EOF + } + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + _, err := hrs.reader() + if err != nil { + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + hrs.brd = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.brd, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // TODO(stevvooe): Get this working correctly. + + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", "1-") + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + hrs.rc = resp.Body + if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + if hrs.brd == nil { + hrs.brd = bufio.NewReader(hrs.rc) + } else { + hrs.brd.Reset(hrs.rc) + } + + return hrs.brd, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 00000000..30e45fab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go new file mode 100644 index 00000000..a1ba7f3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go @@ -0,0 +1,2 @@ +// Package registry provides the main entrypoints for running a registry. +package registry diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go new file mode 100644 index 00000000..23225493 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go @@ -0,0 +1,964 @@ +package handlers + +import ( + cryptorand "crypto/rand" + "expvar" + "fmt" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/health/checks" + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/libtrust" + "github.com/garyburd/redigo/redis" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + +// defaultCheckInterval is the default time in between health checks +const defaultCheckInterval = 10 * time.Second + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + context.Context + + Config *configuration.Configuration + + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. + accessController auth.AccessController // main access controller for application + + // httpHost is a parsed representation of the http.host parameter from + // the configuration. Only the Scheme and Host fields are used. + httpHost url.URL + + // events contains notification related configuration. + events struct { + sink notifications.Sink + source notifications.SourceRecord + } + + redis *redis.Pool + + // trustKey is a deprecated key used to sign manifests converted to + // schema1 for backward compatibility. It should not be used for any + // other purposes. + trustKey libtrust.PrivateKey + + // isCache is true if this registry is configured as a pull through cache + isCache bool + + // readOnly is true if the registry is in a read-only maintenance mode + readOnly bool +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(ctx context.Context, configuration *configuration.Configuration) *App { + app := &App{ + Config: configuration, + Context: ctx, + router: v2.RouterWithPrefix(configuration.HTTP.Prefix), + isCache: configuration.Proxy.RemoteURL != "", + } + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + + var err error + app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + purgeConfig := uploadPurgeDefaultConfig() + if mc, ok := configuration.Storage["maintenance"]; ok { + if v, ok := mc["uploadpurging"]; ok { + purgeConfig, ok = v.(map[interface{}]interface{}) + if !ok { + panic("uploadpurging config key must contain additional keys") + } + } + if v, ok := mc["readonly"]; ok { + readOnly, ok := v.(map[interface{}]interface{}) + if !ok { + panic("readonly config key must contain additional keys") + } + if readOnlyEnabled, ok := readOnly["enabled"]; ok { + app.readOnly, ok = readOnlyEnabled.(bool) + if !ok { + panic("readonly's enabled config key must have a boolean value") + } + } + } + } + + startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) + + app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) + if err != nil { + panic(err) + } + + app.configureSecret(configuration) + app.configureEvents(configuration) + app.configureRedis(configuration) + app.configureLogHook(configuration) + + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + + if configuration.HTTP.Host != "" { + u, err := url.Parse(configuration.HTTP.Host) + if err != nil { + panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) + } + app.httpHost = *u + } + + options := []storage.RegistryOption{} + + if app.isCache { + options = append(options, storage.DisableDigestResumption) + } + + // configure deletion + if d, ok := configuration.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok := e.(bool); ok && deleteEnabled { + options = append(options, storage.EnableDelete) + } + } + } + + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := configuration.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + } + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } else { + options = append(options, storage.EnableRedirect) + } + + // configure storage caches + if cc, ok := configuration.Storage["cache"]; ok { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + ctxu.GetLogger(app).Infof("using redis blob descriptor cache") + case "inmemory": + cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") + default: + if v != "" { + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + } + + app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) + if err != nil { + panic(err) + } + + authType := configuration.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + ctxu.GetLogger(app).Debugf("configured %q access controller", authType) + } + + // configure as a pull through cache + if configuration.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) + } + + return app +} + +// RegisterHealthChecks is an awful hack to defer health check registration +// control to callers. This should only ever be called once per registry +// process, typically in a main function. The correct way would be register +// health checks outside of app, since multiple apps may exist in the same +// process. Because the configuration and app are tightly coupled, +// implementing this properly will require a refactor. This method may panic +// if called twice in the same process. +func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { + if len(healthRegistries) > 1 { + panic("RegisterHealthChecks called with more than one registry") + } + healthRegistry := health.DefaultRegistry + if len(healthRegistries) == 1 { + healthRegistry = healthRegistries[0] + } + + if app.Config.Health.StorageDriver.Enabled { + interval := app.Config.Health.StorageDriver.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + storageDriverCheck := func() error { + _, err := app.driver.List(app, "/") // "/" should always exist + return err // any error will be treated as failure + } + + if app.Config.Health.StorageDriver.Threshold != 0 { + healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) + } else { + healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) + } + } + + for _, fileChecker := range app.Config.Health.FileCheckers { + interval := fileChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) + } + + for _, httpChecker := range app.Config.Health.HTTPCheckers { + interval := httpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + statusCode := httpChecker.StatusCode + if statusCode == 0 { + statusCode = 200 + } + + checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) + + if httpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) + healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) + healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) + } + } + + for _, tcpChecker := range app.Config.Health.TCPCheckers { + interval := tcpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) + + if tcpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) + } + } +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// configureEvents prepares the event sink for action. +func (app *App) configureEvents(configuration *configuration.Configuration) { + // Configure all of the endpoint sinks. + var sinks []notifications.Sink + for _, endpoint := range configuration.Notifications.Endpoints { + if endpoint.Disabled { + ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) + continue + } + + ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ + Timeout: endpoint.Timeout, + Threshold: endpoint.Threshold, + Backoff: endpoint.Backoff, + Headers: endpoint.Headers, + }) + + sinks = append(sinks, endpoint) + } + + // NOTE(stevvooe): Moving to a new queueing implementation is as easy as + // replacing broadcaster with a rabbitmq implementation. It's recommended + // that the registry instances also act as the workers to keep deployment + // simple. + app.events.sink = notifications.NewBroadcaster(sinks...) + + // Populate registry event source + hostname, err := os.Hostname() + if err != nil { + hostname = configuration.HTTP.Addr + } else { + // try to pick the port off the config + _, port, err := net.SplitHostPort(configuration.HTTP.Addr) + if err == nil { + hostname = net.JoinHostPort(hostname, port) + } + } + + app.events.source = notifications.SourceRecord{ + Addr: hostname, + InstanceID: ctxu.GetStringValue(app, "instance.id"), + } +} + +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + ctxu.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) + + done := func(err error) { + logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", + ctxu.Since(ctx, "redis.connect.startedat")) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) +} + +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + entry, ok := ctxu.GetLogger(app).(*log.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() // ensure that request body is always closed. + + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) + + defer func() { + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + ctxu.GetResponseLogger(ctx).Infof("response completed") + } + }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } + + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + app.router.ServeHTTP(w, r) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for headerName, headerValues := range app.Config.HTTP.Headers { + for _, value := range headerValues { + w.Header().Add(headerName, value) + } + } + + context := app.context(w, r) + + if err := app.authorized(w, r, context); err != nil { + ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) + return + } + + // Add username to request logging + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + + if app.nameRequired(r) { + repository, err := app.registry.Repository(context, getName(context)) + + if err != nil { + ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) + + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) + case distribution.ErrRepositoryNameInvalid: + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + } + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + + // assign and decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + repository, + app.eventBridge(context, r)) + + context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) + if err != nil { + ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + } + + dispatch(context, r).ServeHTTP(w, r) + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + + app.logError(context, context.Errors) + } + }) +} + +func (app *App) logError(context context.Context, errors errcode.Errors) { + for _, e1 := range errors { + var c ctxu.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = ctxu.WithValue(context, "err.code", e.Code) + c = ctxu.WithValue(c, "err.message", e.Code.Message()) + c = ctxu.WithValue(c, "err.detail", e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = ctxu.WithValue(context, "err.code", e) + c = ctxu.WithValue(c, "err.message", e.Message()) + default: + // just normal go 'error' + c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, "err.message", e1.Error()) + } + + c = ctxu.WithLogger(c, ctxu.GetLogger(c, + "err.code", + "err.message", + "err.detail")) + ctxu.GetResponseLogger(c).Errorf("response completed with error") + } +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { + ctx := defaultContextManager.context(app, w, r) + ctx = ctxu.WithVars(ctx, r) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, + "vars.name", + "vars.reference", + "vars.digest", + "vars.uuid")) + + context := &Context{ + App: app, + Context: ctx, + } + + if app.httpHost.Scheme != "" && app.httpHost.Host != "" { + // A "host" item in the configuration takes precedence over + // X-Forwarded-Proto and X-Forwarded-Host headers, and the + // hostname in the request. + context.urlBuilder = v2.NewURLBuilder(&app.httpHost) + } else { + context.urlBuilder = v2.NewURLBuilderFromRequest(r) + } + + return context +} + +// authorized checks if the request can proceed with access to the requested +// repository. If it succeeds, the context may access the requested +// repository. An error will be returned if access is not available. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + ctxu.GetLogger(context).Debug("authorizing request") + repo := getName(context) + + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if repo != "" { + accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + if fromRepo := r.FormValue("from"); fromRepo != "" { + // mounting a blob from one repository to another requires pull (GET) + // access to the source repository. + accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) + } + } else { + // Only allow the name not to be set on the base route. + if app.nameRequired(r) { + // For this to be properly secured, repo must always be set for a + // resource that may make a modification. The only condition under + // which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making + // that mistake elsewhere in the code, allowing any operation to + // proceed. + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return fmt.Errorf("forbidden: no repository name") + } + accessRecords = appendCatalogAccessRecord(accessRecords, r) + } + + ctx, err := app.accessController.Authorized(context.Context, accessRecords...) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + // Add the appropriate WWW-Auth header + err.SetHeaders(w) + + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context + // should be replaced by another, rather than replacing the context on a + // mutable object. + context.Context = ctx + return nil +} + +// eventBridge returns a bridge for the current request, configured with the +// correct actor and source. +func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { + actor := notifications.ActorRecord{ + Name: getUserName(ctx, r), + } + request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) + + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) +} + +// nameRequired returns true if the route requires a name. +func (app *App) nameRequired(r *http.Request) bool { + route := mux.CurrentRoute(r) + routeName := route.GetName() + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} + +// appendAccessRecords checks the method and adds the appropriate Access records to the records list. +func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { + resource := auth.Resource{ + Type: "repository", + Name: repo, + } + + switch method { + case "GET", "HEAD": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + records = append(records, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return records +} + +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + +// applyRegistryMiddleware wraps a registry instance with the configured middlewares +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { + for _, mw := range middlewares { + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) + if err != nil { + return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) + } + registry = rmw + } + return registry, nil + +} + +// applyRepoMiddleware wraps a repository with the configured middlewares +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { + for _, mw := range middlewares { + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) + if err != nil { + return nil, err + } + repository = rmw + } + return repository, nil +} + +// applyStorageMiddleware wraps a storage driver with the configured middlewares +func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { + for _, mw := range middlewares { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) + if err != nil { + return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) + } + driver = smw + } + return driver, nil +} + +// uploadPurgeDefaultConfig provides a default configuration for upload +// purging to be used in the absence of configuration in the +// confifuration file +func uploadPurgeDefaultConfig() map[interface{}]interface{} { + config := map[interface{}]interface{}{} + config["enabled"] = true + config["age"] = "168h" + config["interval"] = "24h" + config["dryrun"] = false + return config +} + +func badPurgeUploadConfig(reason string) { + panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) +} + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { + if config["enabled"] == false { + return + } + + var purgeAgeDuration time.Duration + var err error + purgeAge, ok := config["age"] + if ok { + ageStr, ok := purgeAge.(string) + if !ok { + badPurgeUploadConfig("age is not a string") + } + purgeAgeDuration, err = time.ParseDuration(ageStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) + } + } else { + badPurgeUploadConfig("age missing") + } + + var intervalDuration time.Duration + interval, ok := config["interval"] + if ok { + intervalStr, ok := interval.(string) + if !ok { + badPurgeUploadConfig("interval is not a string") + } + + intervalDuration, err = time.ParseDuration(intervalStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) + } + } else { + badPurgeUploadConfig("interval missing") + } + + var dryRunBool bool + dryRun, ok := config["dryrun"] + if ok { + dryRunBool, ok = dryRun.(bool) + if !ok { + badPurgeUploadConfig("cannot parse dryrun") + } + } else { + badPurgeUploadConfig("dryrun missing") + } + + go func() { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + log.Infof("Starting upload purge in %s", intervalDuration) + time.Sleep(intervalDuration) + } + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go new file mode 100644 index 00000000..8727a3cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go @@ -0,0 +1,11 @@ +// +build go1.4 + +package handlers + +import ( + "net/http" +) + +func basicAuth(r *http.Request) (username, password string, ok bool) { + return r.BasicAuth() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go new file mode 100644 index 00000000..6cf10a25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go @@ -0,0 +1,41 @@ +// +build !go1.4 + +package handlers + +import ( + "encoding/base64" + "net/http" + "strings" +) + +// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we +// can compile on go1.3 and earlier. + +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go new file mode 100644 index 00000000..fb250acd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go @@ -0,0 +1,99 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + } + + if !ctx.readOnly { + mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) + } + + return mhandler +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) + } else { + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrUnsupported: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) + return + case distribution.ErrBlobUnknown: + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + return + default: + bh.Errors = append(bh.Errors, err) + context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) + return + } + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go new file mode 100644 index 00000000..1e3bff95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go @@ -0,0 +1,385 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/url" + "os" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage" + "github.com/gorilla/handlers" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := handlers.MethodHandler{ + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + } + + if !ctx.readOnly { + handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) + handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) + handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) + handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) + } + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + }) + } + buh.Upload = upload + + if state.Offset > 0 { + // Seek the blob upload to the correct spot if it's non-zero. + // These error conditions should be rare and demonstrate really + // problems. We basically cancel the upload and tell the client to + // start over. + if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } else if nn != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } + } + + return closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session, optionally mounting the blob from a separate repository. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + var options []distribution.BlobCreateOption + + fromRepo := r.FormValue("from") + mountDigest := r.FormValue("mount") + + if mountDigest != "" && fromRepo != "" { + opt, err := buh.createBlobMountOption(fromRepo, mountDigest) + if opt != nil && err == nil { + options = append(options, opt) + } + } + + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh, options...) + + if err != nil { + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + } else if err == distribution.ErrUnsupported { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + buh.Upload = upload + defer buh.Upload.Close() + + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) + return + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) + return + } + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the length and mediatype. For now, we can let the + // backend take care of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + default: + switch err { + case distribution.ErrUnsupported: + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + default: + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + + var offset int64 + if !fresh { + var err error + offset, err = buh.Upload.Seek(0, os.SEEK_CUR) + if err != nil { + ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) + return err + } + } + + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Name() + buh.State.UUID = buh.Upload.ID() + buh.State.Offset = offset + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Name(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := offset + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} + +// mountBlob attempts to mount a blob from another repository by its digest. If +// successful, the blob is linked into the blob store and 201 Created is +// returned with the canonical url of the blob. +func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { + dgst, err := digest.ParseDigest(mountDigest) + if err != nil { + return nil, err + } + + ref, err := reference.ParseNamed(fromRepo) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(ref, dgst) + if err != nil { + return nil, err + } + + return storage.WithMountFrom(canonical), nil +} + +// writeBlobCreatedHeaders writes the standard headers describing a newly +// created blob. A 201 Created is written as well as the canonical URL and +// blob digest. +func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { + blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) + if err != nil { + return err + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go new file mode 100644 index 00000000..6ec1fe55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go @@ -0,0 +1,95 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + if err == io.EOF { + moreEntries = false + } else if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos[0:filled], + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go new file mode 100644 index 00000000..85a17123 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go @@ -0,0 +1,151 @@ +package handlers + +import ( + "fmt" + "net/http" + "sync" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "golang.org/x/net/context" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + context.Context + + // Repository is the repository for the current request. All requests + // should be scoped to a single repository. This field may be nil. + Repository distribution.Repository + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors errcode.Errors + + urlBuilder *v2.URLBuilder + + // TODO(stevvooe): The goal is too completely factor this context and + // dispatching out of the web application. Ideally, we should lean on + // context.Context for injection of these resources. +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +func getName(ctx context.Context) (name string) { + return ctxu.GetStringValue(ctx, "vars.name") +} + +func getReference(ctx context.Context) (reference string) { + return ctxu.GetStringValue(ctx, "vars.reference") +} + +var errDigestNotAvailable = fmt.Errorf("digest not available in context") + +func getDigest(ctx context.Context) (dgst digest.Digest, err error) { + dgstStr := ctxu.GetStringValue(ctx, "vars.digest") + + if dgstStr == "" { + ctxu.GetLogger(ctx).Errorf("digest not available") + return "", errDigestNotAvailable + } + + d, err := digest.ParseDigest(dgstStr) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + return "", err + } + + return d, nil +} + +func getUploadUUID(ctx context.Context) (uuid string) { + return ctxu.GetStringValue(ctx, "vars.uuid") +} + +// getUserName attempts to resolve a username from the context and request. If +// a username cannot be resolved, the empty string is returned. +func getUserName(ctx context.Context, r *http.Request) string { + username := ctxu.GetStringValue(ctx, "auth.user.name") + + // Fallback to request user with basic auth + if username == "" { + var ok bool + uname, _, ok := basicAuth(r) + if ok { + username = uname + } + } + + return username +} + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go new file mode 100644 index 00000000..5a3c9984 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go @@ -0,0 +1,62 @@ +package handlers + +import ( + "errors" + "io" + "net/http" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} + +// copyFullPayload copies the payload of a HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + + ctxu.GetLogger(context).Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go new file mode 100644 index 00000000..1725d240 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go @@ -0,0 +1,72 @@ +package handlers + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. + Name string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 + + // StartedAt is the original start time of the upload. + StartedAt time.Time +} + +type hmacKey string + +// unpackUploadState unpacks and validates the blob upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, err + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, fmt.Errorf("Invalid token") + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, fmt.Errorf("Invalid token") + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go new file mode 100644 index 00000000..7bbab4f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go new file mode 100644 index 00000000..51156d3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go @@ -0,0 +1,338 @@ +package handlers + +import ( + "bytes" + "fmt" + "net/http" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// These constants determine which architecture and OS to choose from a +// manifest list when downconverting it to a schema1 manifest. +const ( + defaultArch = "amd64" + defaultOS = "linux" +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + } + reference := getReference(ctx) + dgst, err := digest.ParseDigest(reference) + if err != nil { + // We just have a tag + imageManifestHandler.Tag = reference + } else { + imageManifestHandler.Digest = dgst + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), + } + + if !ctx.readOnly { + mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) + mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) + } + + return mhandler +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + // One of tag or digest gets set, depending on what is present in context. + Tag string + Digest digest.Digest +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("GetImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var manifest distribution.Manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + desc, err := tags.Get(imh, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + imh.Digest = desc.Digest + } + + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + manifest, err = manifests.Get(imh, imh.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + supportsSchema2 := false + supportsManifestList := false + if acceptHeaders, ok := r.Header["Accept"]; ok { + for _, mediaType := range acceptHeaders { + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + } + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if imh.Tag != "" && isSchema2 && !supportsSchema2 { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else if imh.Tag != "" && isManifestList && !supportsManifestList { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } + + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } + } + + ct, p, err := manifest.Payload() + if err != nil { + return + } + + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) + w.Write(p) +} + +func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, imh.Repository.Name(), imh.Tag, configJSON) + for _, d := range schema2Manifest.References() { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + } + manifest, err := builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + return manifest, nil +} + +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted + return true + } + } + return false +} + +// PutImageManifest validates and stores an image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("PutImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var jsonBuf bytes.Buffer + if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + mediaType := r.Header.Get("Content-Type") + manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + + if imh.Digest != "" { + if desc.Digest != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + } + } else if imh.Tag != "" { + imh.Digest = desc.Digest + } else { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) + return + } + + _, err = manifests.Put(imh, manifest) + if err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + if err == distribution.ErrUnsupported { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + } + switch err := err.(type) { + case distribution.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case distribution.ErrManifestBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) + case distribution.ErrManifestNameInvalid: + imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case distribution.ErrManifestUnverified: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) + } + } + } + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + return + } + + // Tag this manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + err = tags.Tag(imh, imh.Tag, desc) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + } + + // Construct a canonical url for the uploaded manifest. + location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) + if err != nil { + // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to + // happen. We'll log the error here but proceed as if it worked. Worst + // case, we set an empty location header. + ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + } + + w.Header().Set("Location", location) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// DeleteImageManifest removes the manifest with the given digest from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("DeleteImageManifest") + + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh, imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + return + } + } + + tagService := imh.Repository.Tags(imh) + referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + for _, tag := range referencedTags { + if err := tagService.Untag(imh, tag); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + } + + w.WriteHeader(http.StatusAccepted) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go new file mode 100644 index 00000000..39244909 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go new file mode 100644 index 00000000..d9f0106c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go @@ -0,0 +1,60 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + tagService := th.Repository.Tags(th) + tags, err := tagService.All(th) + if err != nil { + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) + default: + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Repository.Name(), + Tags: tags, + }); err != nil { + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go new file mode 100644 index 00000000..b93a7a63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go new file mode 100644 index 00000000..7535c6db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// InitFunc is the type of a RegistryMiddleware factory function and is +// used to register the constructor for different RegistryMiddleware backends. +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RegistryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RegistryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, registry, options) + } + } + + return nil, fmt.Errorf("no registry middleware registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go new file mode 100644 index 00000000..27b42aec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// InitFunc is the type of a RepositoryMiddleware factory function and is +// used to register the constructor for different RepositoryMiddleware backends. +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RepositoryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RepositoryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, repository, options) + } + } + + return nil, fmt.Errorf("no repository middleware registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go new file mode 100644 index 00000000..e4bec75a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -0,0 +1,54 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution/registry/client/auth" +) + +const tokenURL = "https://auth.docker.io/token" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +// ConfigureAuth authorizes with the upstream registry +func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { + if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { + return nil, err + } + + creds := map[string]userpass{ + tokenURL: { + username: username, + password: password, + }, + } + return credentials{creds: creds}, nil +} + +func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go new file mode 100644 index 00000000..41b76e8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -0,0 +1,186 @@ +package proxy + +import ( + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.BlobStore = &proxyBlobStore{} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]struct{}) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { + desc, err := pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + if w, ok := writer.(http.ResponseWriter); ok { + setResponseHeaders(w, desc.Size, desc.MediaType, dgst) + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + _, err = io.CopyN(writer, remoteReader, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + + return desc, nil +} + +func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { + localDesc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil { + // Stat can report a zero sized file here if it's checked between creation + // and population. Return nil error, and continue + return false, nil + } + + if err == nil { + proxyMetrics.BlobPush(uint64(localDesc.Size)) + return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + return false, nil + +} + +func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { + defer func() { + mu.Lock() + delete(inflight, dgst) + mu.Unlock() + }() + + var desc distribution.Descriptor + var err error + var bw distribution.BlobWriter + + bw, err = pbs.localStore.Create(ctx) + if err != nil { + return err + } + + desc, err = pbs.copyContent(ctx, dgst, bw) + if err != nil { + return err + } + + _, err = bw.Commit(ctx, desc) + if err != nil { + return err + } + + return nil +} + +func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + served, err := pbs.serveLocal(ctx, w, r, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) + return err + } + + if served { + return nil + } + + mu.Lock() + _, ok := inflight[dgst] + if ok { + mu.Unlock() + _, err := pbs.copyContent(ctx, dgst, w) + return err + } + inflight[dgst] = struct{}{} + mu.Unlock() + + go func(dgst digest.Digest) { + if err := pbs.storeLocal(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) + } + pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) + }(dgst) + + _, err = pbs.copyContent(ctx, dgst, w) + if err != nil { + return err + } + return nil +} + +func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +// Unsupported functions +func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go new file mode 100644 index 00000000..13cb5f6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -0,0 +1,86 @@ +package proxy + +import ( + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName string + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(ctx, dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.Exists(ctx, dgst) +} + +func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + // At this point `dgst` was either specified explicitly, or returned by the + // tagstore with the most recent association. + var fromRemote bool + manifest, err := pms.localManifests.Get(ctx, dgst, options...) + if err != nil { + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + fromRemote = true + } + + _, payload, err := manifest.Payload() + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPush(uint64(len(payload))) + if fromRemote { + proxyMetrics.ManifestPull(uint64(len(payload))) + + _, err = pms.localManifests.Put(ctx, manifest) + if err != nil { + return nil, err + } + + // Schedule the repo for removal + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + // Ensure the manifest blob is cleaned up + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + } + + return manifest, err +} + +func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var d digest.Digest + return d, distribution.ErrUnsupported +} + +func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +/*func (pms proxyManifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + return 0, distribution.ErrUnsupported +} +*/ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go new file mode 100644 index 00000000..d3d84d78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go new file mode 100644 index 00000000..8e1be5f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -0,0 +1,142 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + + scheduler *scheduler.TTLExpirationScheduler + + remoteURL string + credentialStore auth.CredentialStore + challengeManager auth.ChallengeManager +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + _, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(digest string) error { + return v.RemoveBlob(digest) + }) + s.OnManifestExpire(func(repoName string) error { + return v.RemoveRepository(repoName) + }) + + err = s.Start() + if err != nil { + return nil, err + } + + challengeManager := auth.NewSimpleChallengeManager() + cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + challengeManager: challengeManager, + credentialStore: cs, + remoteURL: config.RemoteURL, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: &proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + }, + manifests: proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + }, + name: name, + tags: proxyTagService{ + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + }, + }, nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name string + tags distribution.TagService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Name() string { + return pr.name +} + +func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { + return pr.tags +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go new file mode 100644 index 00000000..c52460c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go @@ -0,0 +1,58 @@ +package proxy + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// proxyTagService supports local and remote lookup of tags. +type proxyTagService struct { + localTags distribution.TagService + remoteTags distribution.TagService +} + +var _ distribution.TagService = proxyTagService{} + +// Get attempts to get the most recent digest for the tag by checking the remote +// tag service first and then caching it locally. If the remote is unavailable +// the local association is returned +func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + desc, err = pt.localTags.Get(ctx, tag) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil +} + +func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} + +func (pt proxyTagService) Untag(ctx context.Context, tag string) error { + err := pt.localTags.Untag(ctx, tag) + if err != nil { + return err + } + return nil +} + +func (pt proxyTagService) All(ctx context.Context) ([]string, error) { + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } + return pt.localTags.All(ctx) +} + +func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go new file mode 100644 index 00000000..e91920a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go @@ -0,0 +1,252 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repository's TTL expires +type expiryFunc func(string) error + +const ( + entryTypeBlob = iota + entryTypeManifest + indexSaveFrequency = 5 * time.Second +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` + + timer *time.Timer +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]*schedulerEntry), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + doneChan: make(chan struct{}), + saveTimer: time.NewTicker(indexSaveFrequency), + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + sync.Mutex + + entries map[string]*schedulerEntry + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc + + indexDirty bool + saveTimer *time.Ticker + doneChan chan struct{} +} + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + ttles.add(dgst, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(repoName, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + ttles.Lock() + defer ttles.Unlock() + + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + + // Start timer for each deserialized entry + for _, entry := range ttles.entries { + entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) + } + + // Start a ticker to periodically save the entries index + + go func() { + for { + select { + case <-ttles.saveTimer.C: + if !ttles.indexDirty { + continue + } + + ttles.Lock() + err := ttles.writeState() + if err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } else { + ttles.indexDirty = false + } + ttles.Unlock() + + case <-ttles.doneChan: + return + } + } + }() + + return nil +} + +func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { + entry := &schedulerEntry{ + Key: key, + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + if oldEntry, present := ttles.entries[key]; present && oldEntry.timer != nil { + oldEntry.timer.Stop() + } + ttles.entries[key] = entry + entry.timer = ttles.startTimer(entry, ttl) + ttles.indexDirty = true +} + +func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { + return time.AfterFunc(ttl, func() { + ttles.Lock() + defer ttles.Unlock() + + var f expiryFunc + + switch entry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(repoName string) error { + return fmt.Errorf("Unexpected scheduler entry type") + } + } + + if err := f(entry.Key); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + } + + delete(ttles.entries, entry.Key) + ttles.indexDirty = true + }) +} + +// Stop stops the scheduler. +func (ttles *TTLExpirationScheduler) Stop() { + ttles.Lock() + defer ttles.Unlock() + + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + + for _, entry := range ttles.entries { + entry.timer.Stop() + } + + close(ttles.doneChan) + ttles.saveTimer.Stop() + ttles.stopped = true +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go new file mode 100644 index 00000000..86cb6a17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go @@ -0,0 +1,337 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/formatters/logstash" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + gorhandlers "github.com/gorilla/handlers" + "github.com/spf13/cobra" + "github.com/yvasiyarov/gorelic" +) + +// Cmd is a cobra command for running the registry. +var Cmd = &cobra.Command{ + Use: "registry ", + Short: "registry stores and distributes Docker images", + Long: "registry stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + + // setup context + ctx := context.WithVersion(context.Background(), version.Version) + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(ctx, config) + if err != nil { + log.Fatalln(err) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + +var showVersion bool + +func init() { + Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + +// A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server +} + +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + var err error + ctx, err = configureLogging(ctx, config) + if err != nil { + return nil, fmt.Errorf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = context.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() + handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) + handler = panicHandler(handler) + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + + server := &http.Server{ + Handler: handler, + } + + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + return err + } + + if config.HTTP.TLS.Certificate != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"http/1.1"}, + Certificates: make([]tls.Certificate, 1), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return err + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + return err + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + return fmt.Errorf("Could not add CA to pool") + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + } else { + context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + } + + return registry.server.Serve(ln) +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = context.WithLogger(ctx, context.GetLogger(ctx)) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// panicHandler add a HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go new file mode 100644 index 00000000..fad0a77a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go new file mode 100644 index 00000000..2655e011 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go @@ -0,0 +1,78 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. + return err + } + } + + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go new file mode 100644 index 00000000..f8fe23fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go @@ -0,0 +1,192 @@ +package storage + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. +type blobStore struct { + driver driver.StorageDriver + statter distribution.BlobStatter +} + +var _ distribution.BlobProvider = &blobStore{} + +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + p, err := bs.driver.GetContent(ctx, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err + } + + return p, err +} + +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err + } + + bp, err := bs.path(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype here, as well. + + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + } + + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go new file mode 100644 index 00000000..37903176 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go @@ -0,0 +1,380 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +// layerWriter is used to control the various aspects of resumable +// layer upload. It implements the LayerUpload interface. +type blobWriter struct { + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy + // LayerUpload Interface + bufferedFileWriter + + resumableDigestEnabled bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.bufferedFileWriter.Close(); err != nil { + return distribution.Descriptor{}, err + } + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + return canonical, nil +} + +// Rollback the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + if err := bw.removeResources(ctx); err != nil { + return err + } + + bw.Close() + return nil +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.err != nil { + return bw.err + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil { + return err + } + + return bw.bufferedFileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + // Stat the on disk file + if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + bw.size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != bw.size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = bw.size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigestAt(ctx, bw.size); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.New() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + defer fr.Close() + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() + } + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[interface{}]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := pathFor(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the digest of an empty tar. + if desc.Digest == digest.DigestSha256EmptyTar { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := pathFor(uploadDataPathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 00000000..39166876 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go new file mode 100644 index 00000000..d33f544d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -0,0 +1,178 @@ +// +build !noresumabledigest + +package storage + +import ( + "fmt" + "io" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigestAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := bw.driver.Delete(ctx, hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired offset. + fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) + if err != nil { + return err + } + defer fr.Close() + + if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) + } + + if _, err := io.CopyN(h, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 00000000..10a39091 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go new file mode 100644 index 00000000..42390953 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go @@ -0,0 +1,179 @@ +package cachecheck + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" +) + +// CheckBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { + ctx := context.Background() + + checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) + checkBlobDescriptorCacheSetAndRead(t, ctx, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") + if err == nil { + t.Fatalf("expected an error when asking for invalid repo") + } + + cache, err = provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repository: %v", err) + } + + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Size: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) + } + + if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ + Digest: "", + Size: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") + } + + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) + } + + if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } +} + +func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error deleting descriptor") + } + + nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + err = cache.Clear(ctx, nonExistantDigest) + if err == nil { + t.Fatalf("expected error deleting unknown descriptor") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 00000000..94ca8a90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,101 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 00000000..68a68f08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,170 @@ +package memory + +import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if rsimbdcp.repository == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + if rsimbdcp.repository == nil { + return distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if rsimbdcp.repository == nil { + // allocate map since we are setting it now. + rsimbdcp.parent.mu.Lock() + var ok bool + // have to read back value since we may have allocated elsewhere. + rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + rsimbdcp.repository = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository + } + + rsimbdcp.parent.mu.Unlock() + } + + if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go new file mode 100644 index 00000000..cb264b09 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -0,0 +1,268 @@ +package redis + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" +) + +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ + pool: pool, + } +} + +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil +} + +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.stat(ctx, conn, dgst) +} + +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err + } + + return desc, nil +} + +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "size", desc.Size); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil +} + +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs sha512). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go new file mode 100644 index 00000000..481489f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go @@ -0,0 +1,66 @@ +package storage + +import ( + "errors" + "io" + "path" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// ErrFinishedWalk is used when the called walk function no longer wants +// to accept any more values. This is used for pagination when the +// required number of repos have been found. +var ErrFinishedWalk = errors.New("finished walk") + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { + var foundRepos []string + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + + // lop the base path off + repoPath := filePath[len(root)+1:] + + _, file := path.Split(repoPath) + if file == "_layers" { + repoPath = strings.TrimSuffix(repoPath, "/_layers") + if repoPath > last { + foundRepos = append(foundRepos, repoPath) + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + return ErrFinishedWalk + } + + return nil + }) + + n = copy(repos, foundRepos) + + // Signal that we have no more entries by setting EOF + if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { + errVal = io.EOF + } + + return n, errVal +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go new file mode 100644 index 00000000..387d9234 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go new file mode 100644 index 00000000..cbb95981 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -0,0 +1,366 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" + paramRealm = "realm" +) + +type driver struct { + client azure.BlobStorageClient + container string +} + +type baseEmbed struct{ base.Base } + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct{ baseEmbed } + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok || fmt.Sprint(accountName) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok || fmt.Sprint(accountKey) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseURL + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + return nil, err + } + + d := &driver{ + client: blobClient, + container: container} + return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil +} + +// Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { + return err + } + if err := d.client.CreateBlockBlob(d.container, path); err != nil { + return err + } + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + info, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + size := int64(info.ContentLength) + if offset >= size { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange) + if err != nil { + return nil, err + } + return resp, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + if blobExists, err := d.client.BlobExists(d.container, path); err != nil { + return 0, err + } else if !blobExists { + err := d.client.CreateBlockBlob(d.container, path) + if err != nil { + return 0, err + } + } + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + zw := newZeroFillWriter(&bw) + return zw.Write(d.container, path, offset, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // Check if the path is a blob + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if ok { + blob, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(blob.ContentLength), + ModTime: mtim, + IsDir: false, + }}, nil + } + + // Check if path is a virtual container + virtContainerPath := path + if !strings.HasSuffix(virtContainerPath, "/") { + virtContainerPath += "/" + } + blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Prefix: virtContainerPath, + MaxResults: 1, + }) + if err != nil { + return nil, err + } + if len(blobs.Blobs) > 0 { + // path is a virtual container + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + }}, nil + } + + // path is not a blob or virtual container + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// URLFor returns a publicly accessible URL for the blob stored at given path +// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration + expires, ok := options["expiry"] + if ok { + t, ok := expires.(time.Time) + if ok { + expiresTime = t + } + } + return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + for { + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + e, ok := err.(azure.AzureStorageServiceError) + return ok && e.StatusCode == http.StatusNotFound +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go new file mode 100644 index 00000000..1c1df899 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go @@ -0,0 +1,24 @@ +package azure + +import ( + "fmt" + "io" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +// azureBlockStorage is adaptor between azure.BlobStorageClient and +// blockStorage interface. +type azureBlockStorage struct { + azure.BlobStorageClient +} + +func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { + return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) +} + +func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { + a := azureBlockStorage{} + a.BlobStorageClient = b + return a +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go new file mode 100644 index 00000000..776c7cd5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go @@ -0,0 +1,60 @@ +package azure + +import ( + "encoding/base64" + "fmt" + "math/rand" + "sync" + "time" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +type blockIDGenerator struct { + pool map[string]bool + r *rand.Rand + m sync.Mutex +} + +// Generate returns an unused random block id and adds the generated ID +// to list of used IDs so that the same block name is not used again. +func (b *blockIDGenerator) Generate() string { + b.m.Lock() + defer b.m.Unlock() + + var id string + for { + id = toBlockID(int(b.r.Int())) + if !b.exists(id) { + break + } + } + b.pool[id] = true + return id +} + +func (b *blockIDGenerator) exists(id string) bool { + _, used := b.pool[id] + return used +} + +func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { + b.m.Lock() + defer b.m.Unlock() + + for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { + b.pool[bl.Name] = true + } +} + +func newBlockIDGenerator() *blockIDGenerator { + return &blockIDGenerator{ + pool: make(map[string]bool), + r: rand.New(rand.NewSource(time.Now().UnixNano()))} +} + +// toBlockId converts given integer to base64-encoded block ID of a fixed length. +func toBlockID(i int) string { + s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs + return base64.StdEncoding.EncodeToString([]byte(s)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go new file mode 100644 index 00000000..f18692d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go @@ -0,0 +1,208 @@ +package azure + +import ( + "fmt" + "io" + "io/ioutil" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +// blockStorage is the interface required from a block storage service +// client implementation +type blockStorage interface { + CreateBlockBlob(container, blob string) error + GetBlob(container, blob string) (io.ReadCloser, error) + GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) + PutBlock(container, blob, blockID string, chunk []byte) error + GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) + PutBlockList(container, blob string, blocks []azure.Block) error +} + +// randomBlobWriter enables random access semantics on Azure block blobs +// by enabling writing arbitrary length of chunks to arbitrary write offsets +// within the blob. Normally, Azure Blob Storage does not support random +// access semantics on block blobs; however, this writer can download, split and +// reupload the overlapping blocks and discards those being overwritten entirely. +type randomBlobWriter struct { + bs blockStorage + blockSize int +} + +func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { + return randomBlobWriter{bs: bs, blockSize: blockSize} +} + +// WriteBlobAt writes the given chunk to the specified position of an existing blob. +// The offset must be equals to size of the blob or smaller than it. +func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { + rand := newBlockIDGenerator() + + blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) + if err != nil { + return 0, err + } + rand.Feed(blocks) // load existing block IDs + + // Check for write offset for existing blob + size := getBlobSize(blocks) + if offset < 0 || offset > size { + return 0, fmt.Errorf("wrong offset for Write: %v", offset) + } + + // Upload the new chunk as blocks + blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) + if err != nil { + return 0, err + } + + // For non-append operations, existing blocks may need to be splitted + if offset != size { + // Split the block on the left end (if any) + leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) + if err != nil { + return 0, err + } + blockList = append(leftBlocks, blockList...) + + // Split the block on the right end (if any) + rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) + if err != nil { + return 0, err + } + blockList = append(blockList, rightBlocks...) + } else { + // Use existing block list + var existingBlocks []azure.Block + for _, v := range blocks.CommittedBlocks { + existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) + } + blockList = append(existingBlocks, blockList...) + } + // Put block list + return nn, r.bs.PutBlockList(container, blob, blockList) +} + +func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { + blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) + if err != nil { + return 0, err + } + return getBlobSize(blocks), nil +} + +// writeChunkToBlocks writes given chunk to one or multiple blocks within specified +// blob and returns their block representations. Those blocks are not committed, yet +func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { + var newBlocks []azure.Block + var nn int64 + + // Read chunks of at most size N except the last chunk to + // maximize block size and minimize block count. + buf := make([]byte, r.blockSize) + for { + n, err := io.ReadFull(chunk, buf) + if err == io.EOF { + break + } + nn += int64(n) + data := buf[:n] + blockID := rand.Generate() + if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { + return newBlocks, nn, err + } + newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) + } + return newBlocks, nn, nil +} + +// blocksLeftSide returns the blocks that are going to be at the left side of +// the writeOffset: [0, writeOffset) by identifying blocks that will remain +// the same and splitting blocks and reuploading them as needed. +func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { + var left []azure.Block + bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) + if err != nil { + return left, err + } + + o := writeOffset + elapsed := int64(0) + for _, v := range bx.CommittedBlocks { + blkSize := int64(v.Size) + if o >= blkSize { // use existing block + left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) + o -= blkSize + elapsed += blkSize + } else if o > 0 { // current block needs to be splitted + start := elapsed + size := o + part, err := r.bs.GetSectionReader(container, blob, start, size) + if err != nil { + return left, err + } + newBlockID := rand.Generate() + + data, err := ioutil.ReadAll(part) + if err != nil { + return left, err + } + if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { + return left, err + } + left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) + break + } + } + return left, nil +} + +// blocksRightSide returns the blocks that are going to be at the right side of +// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain +// the same and splitting blocks and reuploading them as needed. +func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { + var right []azure.Block + + bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) + if err != nil { + return nil, err + } + + re := writeOffset + chunkSize - 1 // right end of written chunk + var elapsed int64 + for _, v := range bx.CommittedBlocks { + var ( + bs = elapsed // left end of current block + be = elapsed + int64(v.Size) - 1 // right end of current block + ) + + if bs > re { // take the block as is + right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) + } else if be > re { // current block needs to be splitted + part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) + if err != nil { + return right, err + } + newBlockID := rand.Generate() + + data, err := ioutil.ReadAll(part) + if err != nil { + return right, err + } + if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { + return right, err + } + right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) + } + elapsed += int64(v.Size) + } + return right, nil +} + +func getBlobSize(blocks azure.BlockListResponse) int64 { + var n int64 + for _, v := range blocks.CommittedBlocks { + n += int64(v.Size) + } + return n +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go new file mode 100644 index 00000000..095489d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go @@ -0,0 +1,49 @@ +package azure + +import ( + "bytes" + "io" +) + +type blockBlobWriter interface { + GetSize(container, blob string) (int64, error) + WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) +} + +// zeroFillWriter enables writing to an offset outside a block blob's size +// by offering the chunk to the underlying writer as a contiguous data with +// the gap in between filled with NUL (zero) bytes. +type zeroFillWriter struct { + blockBlobWriter +} + +func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { + w := zeroFillWriter{} + w.blockBlobWriter = b + return w +} + +// Write writes the given chunk to the specified existing blob even though +// offset is out of blob's size. The gaps are filled with zeros. Returned +// written number count does not include zeros written. +func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { + size, err := z.blockBlobWriter.GetSize(container, blob) + if err != nil { + return 0, err + } + + var reader io.Reader + var zeroPadding int64 + if offset <= size { + reader = chunk + } else { + zeroPadding = offset - size + offset = size // adjust offset to be the append index + zeros := bytes.NewReader(make([]byte, zeroPadding)) + reader = io.MultiReader(zeros, chunk) + } + + nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) + nn -= zeroPadding + return nn, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go new file mode 100644 index 00000000..c816d2d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -0,0 +1,202 @@ +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnecessary field. +package base + +import ( + "io" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + storagedriver.StorageDriver +} + +// Format errors received from the storage driver +func (base *Base) setDriverName(e error) error { + switch actual := e.(type) { + case nil: + return nil + case storagedriver.ErrUnsupportedMethod: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.PathNotFoundError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidPathError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidOffsetError: + actual.DriverName = base.StorageDriver.Name() + return actual + default: + storageError := storagedriver.Error{ + DriverName: base.StorageDriver.Name(), + Enclosed: e, + } + + return storageError + } +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.GetContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + b, e := base.StorageDriver.GetContent(ctx, path) + return b, base.setDriverName(e) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.PutContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) +} + +// ReadStream wraps ReadStream of underlying storage driver. +func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + rc, e := base.StorageDriver.ReadStream(ctx, path, offset) + return rc, base.setDriverName(e) +} + +// WriteStream wraps WriteStream of underlying storage driver. +func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return 0, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + i64, e := base.StorageDriver.WriteStream(ctx, path, offset, reader) + return i64, base.setDriverName(e) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Stat(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + fi, e := base.StorageDriver.Stat(ctx, path) + return fi, base.setDriverName(e) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.List(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.List(ctx, path) + return str, base.setDriverName(e) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) + + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Delete(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Delete(ctx, path)) +} + +// URLFor wraps URLFor of underlying storage driver. +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.URLFor(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.URLFor(ctx, path, options) + return str, base.setDriverName(e) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go new file mode 100644 index 00000000..e84f0026 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go @@ -0,0 +1,55 @@ +package factory + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go new file mode 100644 index 00000000..e5064029 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go @@ -0,0 +1,79 @@ +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go new file mode 100644 index 00000000..5b495818 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go @@ -0,0 +1,288 @@ +package filesystem + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "filesystem" +const defaultRootDirectory = "/var/lib/registry" + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters), nil +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +func FromParameters(parameters map[string]interface{}) *Driver { + var rootDirectory = defaultRootDirectory + if parameters != nil { + rootDir, ok := parameters["rootdirectory"] + if ok { + rootDirectory = fmt.Sprint(rootDir) + } + } + return New(rootDirectory) +} + +// New constructs a new Driver with a given rootDirectory +func New(rootDirectory string) *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + rootDirectory: rootDirectory, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return os.Truncate(d.fullPath(subPath), int64(len(contents))) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +// WriteStream stores the contents of the provided io.Reader at a location +// designated by the given path. +func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { + // TODO(stevvooe): This needs to be a requirement. + // if !path.IsAbs(subPath) { + // return fmt.Errorf("absolute path required: %q", subPath) + // } + + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0777); err != nil { + return 0, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + // TODO(stevvooe): A few missing conditions in storage driver: + // 1. What if the path is already a directory? + // 2. Should number 1 be exposed explicitly in storagedriver? + // 2. Can this path not exist, even if we create above? + return 0, err + } + defer fp.Close() + + nn, err = fp.Seek(offset, os.SEEK_SET) + if err != nil { + return 0, err + } + + if nn != offset { + return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) + } + + return io.Copy(fp, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/doc.go new file mode 100644 index 00000000..0f23ea78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/doc.go @@ -0,0 +1,3 @@ +// Package gcs implements the Google Cloud Storage driver backend. Support can be +// enabled by including the "include_gcs" build tag. +package gcs diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go new file mode 100644 index 00000000..765d5492 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go @@ -0,0 +1,625 @@ +// Package gcs provides a storagedriver.StorageDriver implementation to +// store blobs in Google cloud storage. +// +// This package leverages the google.golang.org/cloud/storage client library +//for interfacing with gcs. +// +// Because gcs is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that gcs guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +// +// +build include_gcs + +package gcs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/googleapi" + storageapi "google.golang.org/api/storage/v1" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" + + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "gcs" +const dummyProjectID = "" + +// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set +type driverParameters struct { + bucket string + config *jwt.Config + email string + privateKey []byte + client *http.Client + rootDirectory string +} + +func init() { + factory.Register(driverName, &gcsDriverFactory{}) +} + +// gcsDriverFactory implements the factory.StorageDriverFactory interface +type gcsDriverFactory struct{} + +// Create StorageDriver from parameters +func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// driver is a storagedriver.StorageDriver implementation backed by GCS +// Objects are stored at absolute keys in the provided bucket. +type driver struct { + client *http.Client + bucket string + email string + privateKey []byte + rootDirectory string +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - bucket +func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + var ts oauth2.TokenSource + jwtConf := new(jwt.Config) + if keyfile, ok := parameters["keyfile"]; ok { + jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) + if err != nil { + return nil, err + } + jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) + } else { + var err error + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + + } + + params := driverParameters{ + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), + } + + return New(params) +} + +// New constructs a new driver +func New(params driverParameters) (storagedriver.StorageDriver, error) { + rootDirectory := strings.Trim(params.rootDirectory, "/") + if rootDirectory != "" { + rootDirectory += "/" + } + d := &driver{ + bucket: params.bucket, + rootDirectory: rootDirectory, + email: params.email, + privateKey: params.privateKey, + client: params.client, + } + + return &base.Base{ + StorageDriver: d, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(context, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + defer wc.Close() + _, err := wc.Write(contents) + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { + name := d.pathToKey(path) + + // copied from google.golang.org/cloud/storage#NewReader : + // to set the additional "Range" header + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", d.bucket, name), + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + if offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) + } + res, err := d.client.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storage.StatObject(d.context(context), d.bucket, name) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + if res.StatusCode < 200 || res.StatusCode > 299 { + res.Body.Close() + return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", d.bucket, name, res.Status) + } + return res.Body, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + if offset == 0 { + return d.writeCompletely(context, path, 0, reader) + } + + service, err := storageapi.New(d.client) + if err != nil { + return 0, err + } + objService := storageapi.NewObjectsService(service) + var obj *storageapi.Object + err = retry(5, func() error { + o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() + obj = o + return err + }) + // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) + if err != nil { + return 0, err + } + + // cannot append more chunks, so redo from scratch + if obj.ComponentCount >= 1023 { + return d.writeCompletely(context, path, offset, reader) + } + + // skip from reader + objSize := int64(obj.Size) + nn, err := skip(reader, objSize-offset) + if err != nil { + return nn, err + } + + // Size <= offset + partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) + gcsContext := d.context(context) + wc := storage.NewWriter(gcsContext, d.bucket, partName) + wc.ContentType = "application/octet-stream" + + if objSize < offset { + err = writeZeros(wc, offset-objSize) + if err != nil { + wc.CloseWithError(err) + return nn, err + } + } + n, err := io.Copy(wc, reader) + if err != nil { + wc.CloseWithError(err) + return nn, err + } + err = wc.Close() + if err != nil { + return nn, err + } + // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end + // of the function + defer storage.DeleteObject(gcsContext, d.bucket, partName) + + req := &storageapi.ComposeRequest{ + Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, + SourceObjects: []*storageapi.ComposeRequestSourceObjects{ + { + Name: obj.Name, + Generation: obj.Generation, + }, { + Name: partName, + Generation: wc.Object().Generation, + }}, + } + + err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) + if err == nil { + nn = nn + n + } + + return nn, err +} + +type request func() error + +func retry(maxTries int, req request) error { + backoff := time.Second + var err error + for i := 0; i < maxTries; i++ { + err = req() + if err == nil { + return nil + } + + status, ok := err.(*googleapi.Error) + if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + return err + } + + time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) + if i <= 4 { + backoff = backoff * 2 + } + } + return err +} + +func (d *driver) writeCompletely(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + defer wc.Close() + + // Copy the first offset bytes of the existing contents + // (padded with zeros if needed) into the writer + if offset > 0 { + existing, err := d.ReadStream(context, path, 0) + if err != nil { + return 0, err + } + defer existing.Close() + n, err := io.CopyN(wc, existing, offset) + if err == io.EOF { + err = writeZeros(wc, offset-n) + } + if err != nil { + return 0, err + } + } + return io.Copy(wc, reader) +} + +func skip(reader io.Reader, count int64) (int64, error) { + if count <= 0 { + return 0, nil + } + return io.CopyN(ioutil.Discard, reader, count) +} + +func writeZeros(wc io.Writer, count int64) error { + buf := make([]byte, 32*1024) + for count > 0 { + size := cap(buf) + if int64(size) > count { + size = int(count) + } + n, err := wc.Write(buf[0:size]) + if err != nil { + return err + } + count = count - int64(n) + } + return nil +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { + var fi storagedriver.FileInfoFields + //try to get as file + gcsContext := d.context(context) + obj, err := storage.StatObject(gcsContext, d.bucket, d.pathToKey(path)) + if err == nil { + fi = storagedriver.FileInfoFields{ + Path: path, + Size: obj.Size, + ModTime: obj.Updated, + IsDir: false, + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + //try to get as folder + dirpath := d.pathToDirKey(path) + + var query *storage.Query + query = &storage.Query{} + query.Prefix = dirpath + query.MaxResults = 1 + + objects, err := storage.ListObjects(gcsContext, d.bucket, query) + if err != nil { + return nil, err + } + if len(objects.Results) < 1 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + } + obj = objects.Results[0] + if obj.Name == dirpath { + fi.Size = obj.Size + fi.ModTime = obj.Updated + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (d *driver) List(context ctx.Context, path string) ([]string, error) { + var query *storage.Query + query = &storage.Query{} + query.Delimiter = "/" + query.Prefix = d.pathToDirKey(path) + list := make([]string, 0, 64) + for { + objects, err := storage.ListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, object := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operationsCheck that the object is not deleted, + // so filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() { + name := object.Name + // Ignore objects with names that end with '#' (these are uploaded parts) + if name[len(name)-1] != '#' { + name = d.keyToPath(name) + list = append(list, name) + } + } + } + for _, subpath := range objects.Prefixes { + subpath = d.keyToPath(subpath) + list = append(list, subpath) + } + query = objects.Next + if query == nil { + break + } + } + if path != "/" && len(list) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in Google Cloud Storage. + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { + prefix := d.pathToDirKey(sourcePath) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + destPrefix := d.pathToDirKey(destPath) + copies := make([]string, 0, len(keys)) + sort.Strings(keys) + var err error + for _, key := range keys { + dest := destPrefix + key[len(prefix):] + _, err = storage.CopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) + if err == nil { + copies = append(copies, dest) + } else { + break + } + } + // if an error occurred, attempt to cleanup the copies made + if err != nil { + for i := len(copies) - 1; i >= 0; i-- { + _ = storage.DeleteObject(gcsContext, d.bucket, copies[i]) + } + return err + } + // delete originals + for i := len(keys) - 1; i >= 0; i-- { + err2 := storage.DeleteObject(gcsContext, d.bucket, keys[i]) + if err2 != nil { + err = err2 + } + } + return err + } + _, err = storage.CopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + if err != nil { + if status := err.(*googleapi.Error); status != nil { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + } + return err + } + return storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) +} + +// listAll recursively lists all names of objects stored at "prefix" and its subpaths. +func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { + list := make([]string, 0, 64) + query := &storage.Query{} + query.Prefix = prefix + query.Versions = false + for { + objects, err := storage.ListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, obj := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operationsCheck that the object is not deleted, + // so filter out any objects with a non-zero time-deleted + if obj.Deleted.IsZero() { + list = append(list, obj.Name) + } + } + query = objects.Next + if query == nil { + break + } + } + return list, nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(context ctx.Context, path string) error { + prefix := d.pathToDirKey(path) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + for _, key := range keys { + if err := storage.DeleteObject(gcsContext, d.bucket, key); err != nil { + return err + } + } + return nil + } + err = storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + if err != nil { + if status := err.(*googleapi.Error); status != nil { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + } + } + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// Returns ErrUnsupportedMethod if this driver has no privateKey +func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { + if d.privateKey == nil { + return "", storagedriver.ErrUnsupportedMethod{} + } + + name := d.pathToKey(path) + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + opts := &storage.SignedURLOptions{ + GoogleAccessID: d.email, + PrivateKey: d.privateKey, + Method: methodString, + Expires: expiresTime, + } + return storage.SignedURL(d.bucket, name, opts) +} + +func (d *driver) context(context ctx.Context) context.Context { + return cloud.WithContext(context, dummyProjectID, d.client) +} + +func (d *driver) pathToKey(path string) string { + return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") +} + +func (d *driver) pathToDirKey(path string) string { + return d.pathToKey(path) + "/" +} + +func (d *driver) keyToPath(key string) string { + return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go new file mode 100644 index 00000000..b5735c0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go @@ -0,0 +1,262 @@ +package inmemory + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "inmemory" + +func init() { + factory.Register(driverName, &inMemoryDriverFactory{}) +} + +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +type driver struct { + root *dir + mutex sync.RWMutex +} + +// baseEmbed allows us to hide the Base embed. +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { + baseEmbed // embedded, hidden base driver. +} + +var _ storagedriver.StorageDriver = &Driver{} + +// New constructs a new Driver. +func New() *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface. + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + return ioutil.ReadAll(rc) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + f, err := d.root.mkfile(p) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + path = normalize(path) + found := d.root.find(path) + + if found.path() != path { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := normalize(path) + + f, err := d.root.mkfile(normalized) + if err != nil { + return 0, fmt.Errorf("not a file") + } + + // Unlock while we are reading from the source, in case we are reading + // from the same mfs instance. This can be fixed by a more granular + // locking model. + d.mutex.Unlock() + d.mutex.RLock() // Take the readlock to block other writers. + var buf bytes.Buffer + + nn, err = buf.ReadFrom(reader) + if err != nil { + // TODO(stevvooe): This condition is odd and we may need to clarify: + // we've read nn bytes from reader but have written nothing to the + // backend. What is the correct return value? Really, the caller needs + // to know that the reader has been advanced and reattempting the + // operation is incorrect. + d.mutex.RUnlock() + d.mutex.Lock() + return nn, err + } + + d.mutex.RUnlock() + d.mutex.Lock() + f.WriteAt(buf.Bytes(), offset) + return nn, err +} + +// Stat returns info about the provided path. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + found := d.root.find(path) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... + } + + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err + } + } + + return entries, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err + } +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: path} + default: + return err + } +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go new file mode 100644 index 00000000..cdefacfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go @@ -0,0 +1,338 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + p = normalize(p) + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if normalize(srcDirname) != normalize(sp.path()) { + return errNotExists + } + + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] + if !ok { + return errNotExists + } + + delete(spd.children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if normalize(dirname) != normalize(parent.path()) { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data + } + + f.mod = time.Now() + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go new file mode 100644 index 00000000..31c00afc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -0,0 +1,119 @@ +// Package middleware - cloudfront wrapper for storage libs +// N.B. currently only works with S3, not arbitrary sites +// +package middleware + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "time" + + "github.com/AdRoll/goamz/cloudfront" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that +// constructs temporary signed CloudFront URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this CloudFront content URL. +type cloudFrontStorageMiddleware struct { + storagedriver.StorageDriver + cloudfront *cloudfront.CloudFront + duration time.Duration +} + +var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} + +// newCloudFrontLayerHandler constructs and returns a new CloudFront +// LayerHandler implementation. +// Required options: baseurl, privatekey, keypairid +func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("No baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("No privatekey provided") + } + pkPath, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + kpid, ok := options["keypairid"] + if !ok { + return nil, fmt.Errorf("No keypairid provided") + } + keypairID, ok := kpid.(string) + if !ok { + return nil, fmt.Errorf("keypairid must be a string") + } + + pkBytes, err := ioutil.ReadFile(pkPath) + if err != nil { + return nil, fmt.Errorf("Failed to read privatekey file: %s", err) + } + + block, _ := pem.Decode([]byte(pkBytes)) + if block == nil { + return nil, fmt.Errorf("Failed to decode private key as an rsa private key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + + cf := cloudfront.New(baseURL, privateKey, keypairID) + + duration := 20 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("Invalid duration: %s", err) + } + duration = dur + } + } + + return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil +} + +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + +// Resolve returns an http.Handler which can serve the contents of the given +// Layer, or an error if not supported by the storagedriver. +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + // TODO(endophage): currently only supports S3 + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) + } + + cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) + if err != nil { + return "", err + } + return cfURL, nil +} + +// init registers the cloudfront layerHandler backend. +func init() { + storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go new file mode 100644 index 00000000..7e40a8dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go @@ -0,0 +1,39 @@ +package storagemiddleware + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// InitFunc is the type of a StorageMiddleware factory function and is +// used to register the constructor for different StorageMiddleware backends. +type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) + +var storageMiddlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a StorageMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if storageMiddlewares == nil { + storageMiddlewares = make(map[string]InitFunc) + } + if _, exists := storageMiddlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + storageMiddlewares[name] = initFunc + + return nil +} + +// Get constructs a StorageMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { + if storageMiddlewares != nil { + if initFunc, exists := storageMiddlewares[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no storage middleware registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go new file mode 100644 index 00000000..d1bc932f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go new file mode 100644 index 00000000..67215bc2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -0,0 +1,789 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// +build include_oss + +package oss + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/distribution/context" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize +const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyID string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + Endpoint string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] + if !ok { + return nil, fmt.Errorf("No accesskeyid parameter provided") + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + return nil, fmt.Errorf("No accesskeysecret parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The internal parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + + params := DriverParameters{ + AccessKeyID: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), + } + + return New(params) +} + +// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) + bucket := client.Bucket(params.Bucket) + client.SetDebug(false) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + return nil, parseError(path, err) + } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []oss.Part{} + var part oss.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the OSS + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying OSS library should handle it, it doesn't seem to + // be part of the shouldRetry function (see denverdino/aliyungo/oss). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part oss.Part + + part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout) + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.ossPath(path), nil) + if err != nil { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != http.StatusNotFound { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Path(d.ossPath(path))) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{}, + d.Bucket.Path(d.ossPath(path))) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && opath[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) + + err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), + d.getContentType(), + getPermissions(), + oss.Options{}) + if err != nil { + logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) + testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("testURL: %s", testURL) + return testURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go new file mode 100644 index 00000000..655c68a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go @@ -0,0 +1,3 @@ +// Package rados implements the rados storage driver backend. Support can be +// enabled by including the "include_rados" build tag. +package rados diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go new file mode 100644 index 00000000..c2be528e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go @@ -0,0 +1,632 @@ +// +build include_rados + +package rados + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "path" + "strconv" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/uuid" + "github.com/noahdesu/go-ceph/rados" +) + +const driverName = "rados" + +// Prefix all the stored blob +const objectBlobPrefix = "blob:" + +// Stripes objects size to 4M +const defaultChunkSize = 4 << 20 +const defaultXattrTotalSizeName = "total-size" + +// Max number of keys fetched from omap at each read operation +const defaultKeysFetched = 1 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + poolname string + username string + chunksize uint64 +} + +func init() { + factory.Register(driverName, &radosDriverFactory{}) +} + +// radosDriverFactory implements the factory.StorageDriverFactory interface +type radosDriverFactory struct{} + +func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *rados.Conn + Ioctx *rados.IOContext + chunksize uint64 +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - poolname: the ceph pool name +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + + pool, ok := parameters["poolname"] + if !ok { + return nil, fmt.Errorf("No poolname parameter provided") + } + + username, ok := parameters["username"] + if !ok { + username = "" + } + + chunksize := uint64(defaultChunkSize) + chunksizeParam, ok := parameters["chunksize"] + if ok { + chunksize, ok = chunksizeParam.(uint64) + if !ok { + return nil, fmt.Errorf("The chunksize parameter should be a number") + } + } + + params := DriverParameters{ + fmt.Sprint(pool), + fmt.Sprint(username), + chunksize, + } + + return New(params) +} + +// New constructs a new Driver +func New(params DriverParameters) (*Driver, error) { + var conn *rados.Conn + var err error + + if params.username != "" { + log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) + conn, err = rados.NewConnWithUser(params.username) + } else { + log.Infof("Opening connection to pool %s", params.poolname) + conn, err = rados.NewConn() + } + + if err != nil { + return nil, err + } + + err = conn.ReadDefaultConfigFile() + if err != nil { + return nil, err + } + + err = conn.Connect() + if err != nil { + return nil, err + } + + log.Infof("Connected") + + ioctx, err := conn.OpenIOContext(params.poolname) + + log.Infof("Connected to pool %s", params.poolname) + + if err != nil { + return nil, err + } + + d := &driver{ + Ioctx: ioctx, + Conn: conn, + chunksize: params.chunksize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +type readStreamReader struct { + driver *driver + oid string + size uint64 + offset uint64 +} + +func (r *readStreamReader) Read(b []byte) (n int, err error) { + // Determine the part available to read + bufferOffset := uint64(0) + bufferSize := uint64(len(b)) + + // End of the object, read less than the buffer size + if bufferSize > r.size-r.offset { + bufferSize = r.size - r.offset + } + + // Fill `b` + for bufferOffset < bufferSize { + // Get the offset in the object chunk + chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) + + // Determine the best size to read + bufferEndOffset := bufferSize + if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { + bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) + } + + // Read the chunk + n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) + + if err != nil { + return int(bufferOffset), err + } + + bufferOffset += uint64(n) + r.offset += uint64(n) + } + + // EOF if the offset is at the end of the object + if r.offset == r.size { + return int(bufferOffset), io.EOF + } + + return int(bufferOffset), nil +} + +func (r *readStreamReader) Close() error { + return nil +} + +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // get object stat + stat, err := d.Stat(ctx, path) + + if err != nil { + return nil, err + } + + if offset > stat.Size() { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return &readStreamReader{ + driver: d, + oid: oid, + size: uint64(stat.Size()), + offset: uint64(offset), + }, nil +} + +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + buf := make([]byte, d.chunksize) + totalRead = 0 + + oid, err := d.getOid(path) + if err != nil { + switch err.(type) { + // Trying to write new object, generate new blob identifier for it + case storagedriver.PathNotFoundError: + oid = d.generateOid() + err = d.putOid(path, oid) + if err != nil { + return 0, err + } + default: + return 0, err + } + } else { + // Check total object size only for existing ones + totalSize, err := d.getXattrTotalSize(ctx, oid) + if err != nil { + return 0, err + } + + // If offset if after the current object size, fill the gap with zeros + for totalSize < uint64(offset) { + sizeToWrite := d.chunksize + if totalSize-uint64(offset) < sizeToWrite { + sizeToWrite = totalSize - uint64(offset) + } + + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) + err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) + if err != nil { + return totalRead, err + } + + totalSize += sizeToWrite + } + } + + // Writer + for { + // Align to chunk size + sizeRead := uint64(0) + sizeToRead := uint64(offset+totalRead) % d.chunksize + if sizeToRead == 0 { + sizeToRead = d.chunksize + } + + // Read from `reader` + for sizeRead < sizeToRead { + nn, err := reader.Read(buf[sizeRead:sizeToRead]) + sizeRead += uint64(nn) + + if err != nil { + if err != io.EOF { + return totalRead, err + } + + break + } + } + + // End of file and nothing was read + if sizeRead == 0 { + break + } + + // Write chunk object + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) + err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) + + if err != nil { + return totalRead, err + } + + // Update total object size as xattr in the first chunk of the object + err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) + if err != nil { + return totalRead, err + } + + totalRead += int64(sizeRead) + + // End of file + if sizeRead < sizeToRead { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // the path is a virtual directory? + if oid == "" { + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: 0, + IsDir: true, + }, + }, nil + } + + // stat first chunk + stat, err := d.Ioctx.Stat(oid + "-0") + + if err != nil { + return nil, err + } + + // get total size of chunked object + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(totalSize), + ModTime: stat.ModTime, + }, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { + files, err := d.listDirectoryOid(dirPath) + + if err != nil { + return nil, storagedriver.PathNotFoundError{Path: dirPath} + } + + keys := make([]string, 0, len(files)) + for k := range files { + if k != dirPath { + keys = append(keys, path.Join(dirPath, k)) + } + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + // Get oid + oid, err := d.getOid(sourcePath) + + if err != nil { + return err + } + + // Move reference + err = d.putOid(destPath, oid) + + if err != nil { + return err + } + + // Delete old reference + err = d.deleteOid(sourcePath) + + if err != nil { + return err + } + + return nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, objectPath string) error { + // Get oid + oid, err := d.getOid(objectPath) + + if err != nil { + return err + } + + // Deleting virtual directory + if oid == "" { + objects, err := d.listDirectoryOid(objectPath) + if err != nil { + return err + } + + for object := range objects { + err = d.Delete(ctx, path.Join(objectPath, object)) + if err != nil { + return err + } + } + } else { + // Delete object chunks + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return err + } + + for offset := uint64(0); offset < totalSize; offset += d.chunksize { + chunkName, _ := d.getChunkNameFromOffset(oid, offset) + + err = d.Ioctx.Delete(chunkName) + if err != nil { + return err + } + } + + // Delete reference + err = d.deleteOid(objectPath) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +// Generate a blob identifier +func (d *driver) generateOid() string { + return objectBlobPrefix + uuid.Generate().String() +} + +// Reference a object and its hierarchy +func (d *driver) putOid(objectPath string, oid string) error { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + createParentReference := true + + // After creating this reference, skip the parents referencing since the + // hierarchy already exists + if oid == "" { + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + if (err == nil) && (len(firstReference) > 0) { + createParentReference = false + } + } + + oids := map[string][]byte{ + base: []byte(oid), + } + + // Reference object + err := d.Ioctx.SetOmap(directory, oids) + if err != nil { + return err + } + + // Esure parent virtual directories + if createParentReference { + return d.putOid(directory, "") + } + + return nil +} + +// Get the object identifier from an object name +func (d *driver) getOid(objectPath string) (string, error) { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + + files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) + + if (err != nil) || (files[base] == nil) { + return "", storagedriver.PathNotFoundError{Path: objectPath} + } + + return string(files[base]), nil +} + +// List the objects of a virtual directory +func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { + return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) +} + +// Remove a file from the files hierarchy +func (d *driver) deleteOid(objectPath string) error { + // Remove object reference + directory := path.Dir(objectPath) + base := path.Base(objectPath) + err := d.Ioctx.RmOmapKeys(directory, []string{base}) + + if err != nil { + return err + } + + // Remove virtual directory if empty (no more references) + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + + if err != nil { + return err + } + + if len(firstReference) == 0 { + // Delete omap + err := d.Ioctx.Delete(directory) + + if err != nil { + return err + } + + // Remove reference on parent omaps + if directory != "" { + return d.deleteOid(directory) + } + } + + return nil +} + +// Takes an offset in an chunked object and return the chunk name and a new +// offset in this chunk object +func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { + chunkID := offset / d.chunksize + chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) + chunkedOffset := offset % d.chunksize + return chunkedOid, chunkedOffset +} + +// Set the total size of a chunked object `oid` +func (d *driver) setXattrTotalSize(oid string, size uint64) error { + // Convert uint64 `size` to []byte + xattr := make([]byte, binary.MaxVarintLen64) + binary.LittleEndian.PutUint64(xattr, size) + + // Save the total size as a xattr in the first chunk + return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) +} + +// Get the total size of the chunked object `oid` stored as xattr +func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { + // Fetch xattr as []byte + xattr := make([]byte, binary.MaxVarintLen64) + xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) + + if err != nil { + return 0, err + } + + if xattrLength != len(xattr) { + context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) + return 0, storagedriver.PathNotFoundError{Path: oid} + } + + // Convert []byte as uint64 + totalSize := binary.LittleEndian.Uint64(xattr) + + return totalSize, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go new file mode 100644 index 00000000..7bb23a85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go @@ -0,0 +1,829 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the AdRoll/goamz client library for interfacing with +// s3. +// +// Because s3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that s3 guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/AdRoll/goamz/aws" + "github.com/AdRoll/goamz/s3" + "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region aws.Region + Encrypt bool + Secure bool + V4Auth bool + ChunkSize int64 + RootDirectory string +} + +func init() { + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskey"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["secretkey"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(fmt.Sprint(regionName)) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + v4AuthBool := false + v4Auth, ok := parameters["v4auth"] + if ok { + v4AuthBool, ok = v4Auth.(bool) + if !ok { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + v4AuthBool, + chunkSize, + fmt.Sprint(rootDirectory), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) + if err != nil { + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) + } + + if !params.Secure { + params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) + } + + s3obj := s3.New(auth, params.Region) + bucket := s3obj.Bucket(params.Bucket) + + if params.V4Auth { + s3obj.Signature = aws.V4Signature + } else { + if params.Region.Name == "eu-central-1" { + return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") + } + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.s3Path(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) + if err != nil { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []s3.Part{} + var part s3.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the s3 + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying s3 library should handle it, it doesn't seem to + // be part of the shouldRetry function (see AdRoll/goamz/s3). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part s3.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! + } + + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the s3 package does not. We may add s3 + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to s3 slows to a crawl. If the RequestTimeout + // ends up getting added to the s3 library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *s3.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) + } + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.s3Path(path), nil) + if err != nil { + if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Name+"/"+d.s3Path(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + s3.CopyOptions{}, + d.Bucket.Name+"/"+d.s3Path(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), + s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]s3.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + +func (d *driver) getOptions() s3.Options { + return s3.Options{SSE: d.Encrypt} +} + +func getPermissions() s3.ACL { + return s3.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go new file mode 100644 index 00000000..dc8bdc8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -0,0 +1,144 @@ +package driver + +import ( + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution/context" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. +type StorageDriver interface { + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(ctx context.Context, path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(ctx context.Context, path string, content []byte) error + + // ReadStream retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + + // WriteStream stores the contents of the provided io.ReadCloser at a + // location designated by the given path. + // May be used to resume writing a stream by providing a nonzero offset. + // The offset must be no larger than the CurrentSize for this path. + WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(ctx context.Context, path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + //given path. + List(ctx context.Context, path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. + Move(ctx context.Context, sourcePath string, destPath string) error + + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(ctx context.Context, path string) error + + // URLFor returns a URL which may be used to retrieve the content stored at + // the given path, possibly using the given options. + // May return an ErrUnsupportedMethod in certain StorageDriver + // implementations. + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) + +// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. +type ErrUnsupportedMethod struct { + DriverName string +} + +func (err ErrUnsupportedMethod) Error() string { + return fmt.Sprintf("%s: unsupported method", err.DriverName) +} + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string + DriverName string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string + DriverName string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 + DriverName string +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) +} + +// Error is a catch-all error type which captures an error string and +// the driver type on which it occured. +type Error struct { + DriverName string + Enclosed error +} + +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go new file mode 100644 index 00000000..86bce794 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -0,0 +1,812 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + TrustID string + Region string + Container string + Prefix string + InsecureSkipVerify bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +// swiftInfo maps the JSON structure returned by Swift /info endpoint +type swiftInfo struct { + Swift struct { + Version string `mapstructure:"version"` + } + Tempurl struct { + Methods []string `mapstructure:"methods"` + } +} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + TrustId: params.TrustID, + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + } else if err != nil { + return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + ChunkSize: params.ChunkSize, + TempURLMethods: make([]string, 0), + AccessKey: params.AccessKey, + } + + info := swiftInfo{} + if config, err := d.Conn.QueryInfo(); err == nil { + _, d.BulkDeleteSupport = config["bulk_delete"] + + if err := mapstructure.Decode(config, &info); err == nil { + d.TempURLContainerKey = info.Swift.Version >= "2.3.0" + d.TempURLMethods = info.Tempurl.Methods + } + } else { + d.TempURLContainerKey = params.TempURLContainerKey + d.TempURLMethods = params.TempURLMethods + } + + if len(d.TempURLMethods) > 0 { + secretKey := params.SecretKey + if secretKey == "" { + secretKey, _ = generateSecret() + } + + // Since Swift 2.2.2, we can now set secret keys on containers + // in addition to the account secret keys. Use them in preference. + if d.TempURLContainerKey { + _, containerHeaders, err := d.Conn.Container(d.Container) + if err != nil { + return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) + } + + d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } else { + // Use the account secret key + _, accountHeaders, err := d.Conn.Account() + if err != nil { + return nil, fmt.Errorf("Failed to fetch account info (%s)", err) + } + + d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + var ( + segments []swift.Object + multi io.Reader + paddingReader io.Reader + currentLength int64 + cursor int64 + segmentPath string + ) + + partNumber := 1 + chunkSize := int64(d.ChunkSize) + zeroBuf := make([]byte, d.ChunkSize) + hash := md5.New() + + getSegment := func() string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) + } + + max := func(a int64, b int64) int64 { + if a > b { + return a + } + return b + } + + createManifest := true + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + manifest, ok := headers["X-Object-Manifest"] + if !ok { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { + return 0, err + } + segments = append(segments, info) + } else { + _, segmentPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentPath); err != nil { + return 0, err + } + createManifest = false + } + currentLength = info.Bytes + } else if err == swift.ObjectNotFound { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + } else { + return 0, err + } + + // First, we skip the existing segments that are not modified by this call + for i := range segments { + if offset < cursor+segments[i].Bytes { + break + } + cursor += segments[i].Bytes + hash.Write([]byte(segments[i].Hash)) + partNumber++ + } + + // We reached the end of the file but we haven't reached 'offset' yet + // Therefore we add blocks of zeros + if offset >= currentLength { + for offset-currentLength >= chunkSize { + // Insert a block a zero + headers, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + currentLength += chunkSize + partNumber++ + hash.Write([]byte(headers["Etag"])) + } + + cursor = currentLength + paddingReader = bytes.NewReader(zeroBuf) + } else if offset-cursor > 0 { + // Offset is inside the current segment : we need to read the + // data from the beginning of the segment to offset + file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + defer file.Close() + paddingReader = file + } + + readers := []io.Reader{} + if paddingReader != nil { + readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) + } + readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) + multi = io.MultiReader(readers...) + + writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { + currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + } + return false, bytesRead, err + } + + segmentHash := md5.New() + writer := io.MultiWriter(currentSegment, segmentHash) + + n, err := io.Copy(writer, multi) + if err != nil { + return false, bytesRead, err + } + + if n > 0 { + defer func() { + closeError := currentSegment.Close() + if err != nil { + err = closeError + } + hexHash := hex.EncodeToString(segmentHash.Sum(nil)) + hash.Write([]byte(hexHash)) + }() + bytesRead += n - max(0, offset-cursor) + } + + if n < chunkSize { + // We wrote all the data + if cursor+n < currentLength { + // Copy the end of the chunk + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + _, copyErr := io.Copy(writer, file) + + if err := file.Close(); err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + if copyErr != nil { + return false, bytesRead, copyErr + } + } + + return true, bytesRead, nil + } + + multi = io.LimitReader(reader, chunkSize) + cursor += chunkSize + partNumber++ + + return false, bytesRead, nil + } + + finished := false + read := int64(0) + bytesRead := int64(0) + for finished == false { + finished, read, err = writeSegment(getSegment()) + bytesRead += read + if err != nil { + return bytesRead, err + } + } + + for ; partNumber < len(segments); partNumber++ { + hash.Write([]byte(segments[partNumber].Hash)) + } + + if createManifest { + if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { + return 0, err + } + } + + expectedHash := hex.EncodeToString(hash.Sum(nil)) + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + for { + var infos swift.Object + if infos, _, err = d.Conn.Object(d.Container, d.swiftPath(path)); err == nil { + if strings.Trim(infos.Hash, "\"") == expectedHash { + return bytesRead, nil + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return bytesRead, err +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // On Swift 1.12, the 'bytes' field is always 0 + // so we need to do a second HEAD request + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + } + + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + _, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + objects = append(objects, segments...) + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + if d.BulkDeleteSupport && len(objects) > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + _, err = d.Conn.BulkDelete(d.Container, filenames) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else { + for _, obj := range objects { + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + if d.SecretKey == "" { + return "", storagedriver.ErrUnsupportedMethod{} + } + + methodString := "GET" + method, ok := options["method"] + if ok { + if methodString, ok = method.(string); !ok { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + if methodString == "HEAD" { + // A "HEAD" request on a temporary URL is allowed if the + // signature was generated with "GET", "POST" or "PUT" + methodString = "GET" + } + + supported := false + for _, method := range d.TempURLMethods { + if method == methodString { + supported = true + break + } + } + + if !supported { + return "", storagedriver.ErrUnsupportedMethod{} + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) + + if d.AccessKey != "" { + // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature + url, _ := url.Parse(tempURL) + query := url.Query() + query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) + url.RawQuery = query.Encode() + tempURL = url.String() + } + + return tempURL, nil +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return segments, err +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func generateSecret() (string, error) { + var secretBytes [32]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) + } + return hex.EncodeToString(secretBytes[:]), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go new file mode 100644 index 00000000..6fea2def --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -0,0 +1,1204 @@ +package testsuites + +import ( + "bytes" + "crypto/sha1" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "gopkg.in/check.v1" +) + +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +// RegisterSuite registers an in-process storage driver test suite with +// the go test runner. +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { + check.Suite(&DriverSuite{ + Constructor: driverConstructor, + SkipCheck: skipCheck, + ctx: context.Background(), + }) +} + +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. +type SkipCheck func() (reason string) + +// NeverSkip is a default SkipCheck which never skips the suite. +var NeverSkip SkipCheck = func() string { return "" } + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + SkipCheck + storagedriver.StorageDriver + ctx context.Context +} + +// SetUpSuite sets up the gocheck test suite. +func (suite *DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } + d, err := suite.Constructor() + c.Assert(err, check.IsNil) + suite.StorageDriver = d +} + +// TearDownSuite tears down the gocheck test suite. +func (suite *DriverSuite) TearDownSuite(c *check.C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, check.IsNil) + } +} + +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List(suite.ctx, "/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists(c *check.C) { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + c.Fatalf(`the root path "/" should always exist: %v`, err) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + checksum := sha1.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, fileSize) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + +// TestReadStreamWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { + suite.testContinueStreamAppend(c, int64(10*1024*1024)) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { + suite.testContinueStreamAppend(c, int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + contentsChunk4 := randomContents(chunkSize) + zeroChunk := make([]byte, int64(chunkSize)) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) + + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + // Test re-writing the last chunk + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) + + // Writing past size of file extends file (no offset error). We would like + // to write chunk 4 one chunk length past chunk 3. It should be successful + // and the resulting file will be 5 chunks long, with a chunk of all + // zeros. + + fullContents = append(fullContents, zeroChunk...) + fullContents = append(fullContents, contentsChunk4...) + + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, chunkSize) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) + + received, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(len(received), check.Equals, len(fullContents)) + c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) + c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) + c.Assert(received, check.DeepEquals, fullContents) + + // Ensure that negative offsets return correct error. + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { + filename := randomPath(32) + + _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList(c *check.C) { + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) + + doesnotexist := path.Join(rootDirectory, "nonexistent") + _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) + c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ + Path: doesnotexist, + DriverName: suite.StorageDriver.Name(), + }) + + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) + c.Assert(err, check.IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(suite.ctx, "/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) + c.Assert(err, check.IsNil) + + sort.Strings(keys) + c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) +} + +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.StorageDriver.Delete(suite.ctx, "/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestURLFor checks that the URLFor method functions properly, but only if it +// is implemented +func (suite *DriverSuite) TestURLFor(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err := http.Get(url) + c.Assert(err, check.IsNil) + defer response.Body.Close() + + read, err := ioutil.ReadAll(response.Body) + c.Assert(err, check.IsNil) + c.Assert(read, check.DeepEquals, contents) + + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err = http.Head(url) + c.Assert(response.StatusCode, check.Equals, 200) + c.Assert(response.ContentLength, check.Equals, int64(32)) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { + filename := randomPath(32) + err := suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, dirname) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like WriteStream does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to WriteStream concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(c, size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +// TestEventualConsistency checks that if stat says that a file is a certain size, then +// you can freely read from the file (this is the only guarantee that the driver needs to provide) +func (suite *DriverSuite) TestEventualConsistency(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + var offset int64 + var misswrites int + var chunkSize int64 = 32 + + for i := 0; i < 1024; i++ { + contents := randomContents(chunkSize) + read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) + c.Assert(err, check.IsNil) + + // We are most concerned with being able to read data as soon as Stat declares + // it is uploaded. This is the strongest guarantee that some drivers (that guarantee + // at best eventual consistency) absolutely need to provide. + if fi.Size() == offset+chunkSize { + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) + + reader.Close() + offset += read + } else { + misswrites++ + } + } + + if misswrites > 0 { + c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") + } + + c.Assert(misswrites, check.Not(check.Equals), 1024) +} + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(suite.ctx, parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) + + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} + +// randomBytes pre-allocates all of the memory sizes needed for the test. If +// anything panics while accessing randomBytes, just make this number bigger. +var randomBytes = make([]byte, 128<<20) + +func init() { + // increase the random bytes to the required maximum + for i := range randomBytes { + randomBytes[i] = byte(rand.Intn(2 << 8)) + } +} + +func randomContents(length int64) []byte { + return randomBytes[:length] +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + + n = copy(p, randomContents(int64(len(p)))) + rr.r -= int64(n) + + if rr.r <= 0 { + err = io.EOF + } + + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go new file mode 100644 index 00000000..b3a5f520 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go @@ -0,0 +1,177 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *fileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go new file mode 100644 index 00000000..529fa673 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go @@ -0,0 +1,180 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +const ( + fileWriterBufferSize = 5 << 20 +) + +// fileWriter implements a remote file writer backed by a storage driver. +type fileWriter struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + + // mutable fields + size int64 // size of the file, aka the current end + offset int64 // offset is the current write offset + err error // terminal error, if set, reader is closed +} + +type bufferedFileWriter struct { + fileWriter + bw *bufio.Writer +} + +// fileWriterInterface makes the desired io compliant interface that the +// filewriter should implement. +type fileWriterInterface interface { + io.WriteSeeker + io.ReaderFrom + io.Closer +} + +var _ fileWriterInterface = &fileWriter{} + +// newFileWriter returns a prepared fileWriter for the driver and path. This +// could be considered similar to an "open" call on a regular filesystem. +func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { + fw := fileWriter{ + driver: driver, + path: path, + ctx: ctx, + } + + if fi, err := driver.Stat(ctx, path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // ignore, offset is zero + default: + return nil, err + } + } else { + if fi.IsDir() { + return nil, fmt.Errorf("cannot write to a directory") + } + + fw.size = fi.Size() + } + + buffered := bufferedFileWriter{ + fileWriter: fw, + } + buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) + + return &buffered, nil +} + +// wraps the fileWriter.Write method to buffer small writes +func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { + return bfw.bw.Write(p) +} + +// wraps fileWriter.Close to ensure the buffer is flushed +// before we close the writer. +func (bfw *bufferedFileWriter) Close() (err error) { + if err = bfw.Flush(); err != nil { + return err + } + err = bfw.fileWriter.Close() + return err +} + +// wraps fileWriter.Seek to ensure offset is handled +// correctly in respect to pending data in the buffer +func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { + if err := bfw.Flush(); err != nil { + return 0, err + } + return bfw.fileWriter.Seek(offset, whence) +} + +// wraps bufio.Writer.Flush to allow intermediate flushes +// of the bufferedFileWriter +func (bfw *bufferedFileWriter) Flush() error { + return bfw.bw.Flush() +} + +// Write writes the buffer p at the current write offset. +func (fw *fileWriter) Write(p []byte) (n int, err error) { + nn, err := fw.ReadFrom(bytes.NewReader(p)) + return int(nn), err +} + +// ReadFrom reads reader r until io.EOF writing the contents at the current +// offset. +func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { + if fw.err != nil { + return 0, fw.err + } + + nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) + + // We should forward the offset, whether or not there was an error. + // Basically, we keep the filewriter in sync with the reader's head. If an + // error is encountered, the whole thing should be retried but we proceed + // from an expected offset, even if the data didn't make it to the + // backend. + fw.offset += nn + + if fw.offset > fw.size { + fw.size = fw.offset + } + + return nn, err +} + +// Seek moves the write position do the requested offest based on the whence +// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. +func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { + if fw.err != nil { + return 0, fw.err + } + + var err error + newOffset := fw.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fw.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + // No problems, set the offset. + fw.offset = newOffset + } + + return fw.offset, err +} + +// Close closes the fileWriter for writing. +// Calling it once is valid and correct and it will +// return a nil error. Calling it subsequent times will +// detect that fw.err has been set and will return the error. +func (fw *fileWriter) Close() error { + if fw.err != nil { + return fw.err + } + + fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go new file mode 100644 index 00000000..a1f8724d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -0,0 +1,415 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(name string, dgst digest.Digest) (string, error) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + registry *registry + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From.Name(), opts.Mount.From.Digest()) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err := repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { + fw, err := newFileWriter(ctx, lbs.driver, path) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + bufferedFileWriter: *fw, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + // only use the first link + linkPathFn := lbs.linkPathFns[0] + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var ( + resolveErr error + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + break // success! + } + + switch err := err.(type) { + case driver.PathNotFoundError: + resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error + default: + return distribution.Descriptor{}, err + } + } + + if resolveErr != nil { + return distribution.Descriptor{}, resolveErr + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } + } + + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go new file mode 100644 index 00000000..42027d13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -0,0 +1,96 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go new file mode 100644 index 00000000..31daa83c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go @@ -0,0 +1,134 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" +) + +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + +type manifestStore struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler +} + +var _ distribution.ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Name(), + Revision: dgst, + } + } + + return nil, err + } + + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err + } + + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} + } + } + + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) +} + +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) + } + + return "", fmt.Errorf("unrecognized manifest type %T", manifest) +} + +// Delete removes the revision of the specified manfiest. +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.blobStore.Delete(ctx, dgst) +} + +func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + return 0, distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go new file mode 100644 index 00000000..4d2d48c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go @@ -0,0 +1,493 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix + + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// -> signatures +// //link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// hashstates// +// -> blob/ +// +// +// The storage backend layout is broken up into a content-addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload id. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. Signatures are separated +// from the manifest payload data and linked into the blob store, as well. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ +// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Blobs: +// +// layerLinkPathSpec: /v2/repositories//_layers///link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// +// Blob Store: +// +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +func pathFor(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{storagePathRoot, storagePathVersion} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pathFor(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestSignaturesPathSpec: + root, err := pathFor(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "signatures"), nil + case manifestSignatureLinkPathSpec: + root, err := pathFor(manifestSignaturesPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + signatureComponents, err := digestPathComponents(v.signature, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pathFor(manifestTagsPathSpec{ + name: v.name, + }) + + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pathFor(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagIndexEntryPathSpec: + root, err := pathFor(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(components...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. + + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestSignaturesPathSpec decribes the path components for the directory +// containing all the signatures for the target blob. Entries are named with +// the underlying key id. +type manifestSignaturesPathSpec struct { + name string + revision digest.Digest +} + +func (manifestSignaturesPathSpec) pathSpec() {} + +// manifestSignatureLinkPathSpec decribes the path components used to look up +// a signature file by the hash of its blob. +type manifestSignatureLinkPathSpec struct { + name string + revision digest.Digest + signature digest.Digest +} + +func (manifestSignatureLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// // blobPathSpec contains the path for the registry global blob store. +// type blobPathSpec struct { +// digest digest.Digest +// } + +// func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + id string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + name string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + return append(prefix, suffix...), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go new file mode 100644 index 00000000..7576b189 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -0,0 +1,139 @@ +package storage + +import ( + "path" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(ctx, driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(ctx, uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uUIDFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uUIDFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uUIDFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go new file mode 100644 index 00000000..869895dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go @@ -0,0 +1,249 @@ +package storage + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool +} + +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error + +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, + } + + bs := &blobStore{ + driver: driver, + statter: statter, + } + + registry := ®istry{ + blobStore: bs, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + }, + statter: statter, + resumableDigestEnabled: true, + } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { + if _, err := reference.ParseNamed(canonicalName); err != nil { + return nil, distribution.ErrRepositoryNameInvalid{ + Name: canonicalName, + Reason: err, + } + } + + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) + if err != nil { + return nil, err + } + } + + return &repository{ + ctx: ctx, + registry: reg, + name: canonicalName, + descriptorCache: descriptorCache, + }, nil +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name string + descriptorCache distribution.BlobDescriptorService +} + +// Name returns the name of the repository. +func (repo *repository) Name() string { + return repo.name +} + +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + } + + ms := &manifestStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + signatures: &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: repo.blobStore, + }, + }, + schema2Handler: &schema2ManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, + } + + // Apply options + for _, option := range options { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + + return ms, nil +} + +// Blobs returns an instance of the BlobStore. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, + } + + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + + return &linkedBlobStore{ + registry: repo.registry, + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go new file mode 100644 index 00000000..115786e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -0,0 +1,99 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + target := mnfst.Target() + _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) + } + + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go new file mode 100644 index 00000000..ede4e0e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go @@ -0,0 +1,131 @@ +package storage + +import ( + "path" + "sync" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +type signatureStore struct { + repository *repository + blobStore *blobStore + ctx context.Context +} + +func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { + signaturesPath, err := pathFor(manifestSignaturesPathSpec{ + name: s.repository.Name(), + revision: dgst, + }) + + if err != nil { + return nil, err + } + + // Need to append signature digest algorithm to path to get all items. + // Perhaps, this should be in the pathMapper but it feels awkward. This + // can be eliminated by implementing listAll on drivers. + signaturesPath = path.Join(signaturesPath, "sha256") + + signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) + if err != nil { + return nil, err + } + + var wg sync.WaitGroup + type result struct { + index int + signature []byte + err error + } + ch := make(chan result) + + bs := s.linkedBlobStore(s.ctx, dgst) + for i, sigPath := range signaturePaths { + sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) + if err != nil { + context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) + continue + } + + wg.Add(1) + go func(idx int, sigdgst digest.Digest) { + defer wg.Done() + context.GetLogger(s.ctx). + Debugf("fetching signature %q", sigdgst) + + r := result{index: idx} + + if p, err := bs.Get(s.ctx, sigdgst); err != nil { + context.GetLogger(s.ctx). + Errorf("error fetching signature %q: %v", sigdgst, err) + r.err = err + } else { + r.signature = p + } + + ch <- r + }(i, sigdgst) + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + // aggregrate the results + signatures := make([][]byte, len(signaturePaths)) +loop: + for { + select { + case result := <-ch: + signatures[result.index] = result.signature + if result.err != nil && err == nil { + // only set the first one. + err = result.err + } + case <-done: + break loop + } + } + + return signatures, err +} + +func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { + bs := s.linkedBlobStore(s.ctx, dgst) + for _, signature := range signatures { + if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { + return err + } + } + return nil +} + +// linkedBlobStore returns the namedBlobStore of the signatures for the +// manifest with the given digest. Effectively, each signature link path +// layout is a unique linked blob store. +func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { + linkpath := func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestSignatureLinkPathSpec{ + name: name, + revision: revision, + signature: dgst, + }) + + } + + return &linkedBlobStore{ + ctx: ctx, + repository: s.repository, + blobStore: s.blobStore, + blobAccessController: &linkedBlobStatter{ + blobStore: s.blobStore, + repository: s.repository, + linkPathFns: []linkPathFunc{linkpath}, + }, + linkPathFns: []linkPathFunc{linkpath}, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go new file mode 100644 index 00000000..02663226 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -0,0 +1,150 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + signatures *signatureStore +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + // Fetch the signatures for the manifest + signatures, err := ms.signatures.Get(dgst) + if err != nil { + return nil, err + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } + + if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go new file mode 100644 index 00000000..df6e8dfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go @@ -0,0 +1,191 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var _ distribution.TagService = &tagStore{} + +// tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects +type tagStore struct { + repository *repository + blobStore *blobStore +} + +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Name(), + }) + if err != nil { + return tags, err + } + + entries, err := ts.blobStore.driver.List(ctx, pathSpec) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} + default: + return tags, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { + tagPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + + if err != nil { + return false, err + } + + exists, err := exists(ctx, ts.blobStore.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// Tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + + if err != nil { + return err + } + + lbs := ts.linkedBlobStore(ctx, tag) + + // Link into the index + if err := lbs.linkBlob(ctx, desc); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(ctx, currentPath, desc.Digest) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + revision, err := ts.blobStore.readlink(ctx, currentPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + + return distribution.Descriptor{}, err + } + + return distribution.Descriptor{Digest: revision}, nil +} + +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { + tagPath, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Name(), + tag: tag, + }) + + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: + return err + } + + return ts.blobStore.driver.Delete(ctx, tagPath) +} + +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + + }}, + } +} + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go new file mode 100644 index 00000000..773d7ba0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go new file mode 100644 index 00000000..60d5a2fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go @@ -0,0 +1,65 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.ParseDigest(dgst) + if err != nil { + return err + } + + blobPath, err := pathFor(blobDataPathSpec{digest: d}) + if err != nil { + return err + } + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go new file mode 100644 index 00000000..d979796e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go @@ -0,0 +1,59 @@ +package storage + +import ( + "errors" + "fmt" + "sort" + + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Stable(sort.StringSlice(children)) + for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + if err := Walk(ctx, driver, child, f); err != nil { + return err + } + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/tags.go new file mode 100644 index 00000000..50305659 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go new file mode 100644 index 00000000..00cd8a6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go @@ -0,0 +1,148 @@ +package testutil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" +) + +// RequestResponseMap is an ordered mapping from Requests to Responses +type RequestResponseMap []RequestResponseMapping + +// RequestResponseMapping defines a Response to be sent in response to a given +// Request +type RequestResponseMapping struct { + Request Request + Response Response +} + +// Request is a simplified http.Request object +type Request struct { + // Method is the http method of the request, for example GET + Method string + + // Route is the http route of this request + Route string + + // QueryParams are the query parameters of this request + QueryParams map[string][]string + + // Body is the byte contents of the http request + Body []byte + + // Headers are the header for this request + Headers http.Header +} + +func (r Request) String() string { + queryString := "" + if len(r.QueryParams) > 0 { + keys := make([]string, 0, len(r.QueryParams)) + queryParts := make([]string, 0, len(r.QueryParams)) + for k := range r.QueryParams { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + for _, val := range r.QueryParams[k] { + queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) + } + } + queryString = "?" + strings.Join(queryParts, "&") + } + var headers []string + if len(r.Headers) > 0 { + var headerKeys []string + for k := range r.Headers { + headerKeys = append(headerKeys, k) + } + sort.Strings(headerKeys) + + for _, k := range headerKeys { + for _, val := range r.Headers[k] { + headers = append(headers, fmt.Sprintf("%s:%s", k, val)) + } + } + + } + return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) +} + +// Response is a simplified http.Response object +type Response struct { + // Statuscode is the http status code of the Response + StatusCode int + + // Headers are the http headers of this Response + Headers http.Header + + // Body is the response body + Body []byte +} + +// testHandler is an http.Handler with a defined mapping from Request to an +// ordered list of Response objects +type testHandler struct { + responseMap map[string][]Response +} + +// NewHandler returns a new test handler that responds to defined requests +// with specified responses +// Each time a Request is received, the next Response is returned in the +// mapping, until no Responses are defined, at which point a 404 is sent back +func NewHandler(requestResponseMap RequestResponseMap) http.Handler { + responseMap := make(map[string][]Response) + for _, mapping := range requestResponseMap { + responses, ok := responseMap[mapping.Request.String()] + if ok { + responseMap[mapping.Request.String()] = append(responses, mapping.Response) + } else { + responseMap[mapping.Request.String()] = []Response{mapping.Response} + } + } + return &testHandler{responseMap: responseMap} +} + +func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + requestBody, _ := ioutil.ReadAll(r.Body) + request := Request{ + Method: r.Method, + Route: r.URL.Path, + QueryParams: r.URL.Query(), + Body: requestBody, + Headers: make(map[string][]string), + } + + // Add headers of interest here + for k, v := range r.Header { + if k == "If-None-Match" { + request.Headers[k] = v + } + } + + responses, ok := app.responseMap[request.String()] + + if !ok || len(responses) == 0 { + http.NotFound(w, r) + return + } + + response := responses[0] + app.responseMap[request.String()] = responses[1:] + + responseHeader := w.Header() + for k, v := range response.Headers { + responseHeader[k] = v + } + + w.WriteHeader(response.StatusCode) + + io.Copy(w, bytes.NewReader(response.Body)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go new file mode 100644 index 00000000..2c1d2d82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go @@ -0,0 +1,78 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "fmt" + "io" + mrand "math/rand" + "time" + + "github.com/docker/distribution/digest" +) + +// CreateRandomTarFile creates a random tarfile, returning it as an +// io.ReadSeeker along with its digest. An error is returned if there is a +// problem generating valid content. +func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := rand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + dgst = digest.FromBytes(target.Bytes()) + + return bytes.NewReader(target.Bytes()), dgst, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 00000000..d433ccaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/print.go b/Godeps/_workspace/src/github.com/docker/distribution/version/print.go new file mode 100644 index 00000000..a82bce39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/print.go @@ -0,0 +1,26 @@ +package version + +import ( + "fmt" + "io" + "os" +) + +// FprintVersion outputs the version string to the writer, in the following +// format, followed by a newline: +// +// +// +// For example, a binary "registry" built from github.com/docker/distribution +// with version "v2.0" would print the following: +// +// registry github.com/docker/distribution v2.0 +// +func FprintVersion(w io.Writer) { + fmt.Fprintln(w, os.Args[0], Package, Version) +} + +// PrintVersion outputs the version information, from Fprint, to stdout. +func PrintVersion() { + FprintVersion(os.Stdout) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go new file mode 100644 index 00000000..450d15c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go @@ -0,0 +1,11 @@ +package version + +// Package is the overall, canonical project import path under which the +// package was built. +var Package = "github.com/docker/distribution" + +// Version indicates which version of the binary is running. This is set to +// the latest release tag by hand, always suffixed by "+unknown". During +// build, it will be replaced by the actual version. The value here will be +// used if the registry is run after a go get based install. +var Version = "v2.1.0+unknown" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh b/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh new file mode 100644 index 00000000..53e29ce9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This bash script outputs the current, desired content of version.go, using +# git describe. For best effect, pipe this to the target file. Generally, this +# only needs to updated for releases. The actual value of will be replaced +# during build time if the makefile is used. + +set -e + +cat <" instruction. + addTrustedFlags(cmd, true) + + cmd.ParseFlags(args, true) + + var ( + context io.ReadCloser + err error + ) + + specifiedContext := cmd.Arg(0) + + var ( + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + progBuff = cli.out + buildBuff = cli.out + if *suppressOutput { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) + case urlutil.IsURL(specifiedContext): + context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName) + default: + contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) + } + + if err != nil { + if *suppressOutput && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(cli.err, progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if context == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := validateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + var resolvedTags []*resolvedTag + if isTrusted() { + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags) + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + + var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if *flShmSize != "" { + shmSize, err = units.RAMInBytes(*flShmSize) + if err != nil { + return err + } + } + + options := types.ImageBuildOptions{ + Context: body, + Memory: memory, + MemorySwap: memorySwap, + Tags: flTags.GetAll(), + SuppressOutput: *suppressOutput, + NoCache: *noCache, + Remove: *rm, + ForceRemove: *forceRm, + PullParent: *pull, + IsolationLevel: container.IsolationLevel(*isolation), + CPUSetCPUs: *flCPUSetCpus, + CPUSetMems: *flCPUSetMems, + CPUShares: *flCPUShares, + CPUQuota: *flCPUQuota, + CPUPeriod: *flCPUPeriod, + CgroupParent: *flCgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: flUlimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), + AuthConfigs: cli.configFile.AuthConfigs, + } + + response, err := cli.client.ImageBuild(options) + if err != nil { + return err + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if *suppressOutput { + fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) + } + return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" { + fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if *suppressOutput { + fmt.Fprintf(cli.out, "%s", buildBuff) + } + + if isTrusted() { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +// validateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func validateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, api.DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(api.DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + return absContextDir, relDockerfile, nil +} + +// writeToFile copies from the given reader and writes it to a file with the +// given filename. +func writeToFile(r io.Reader, filename string) error { + file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to create file: %v", err) + } + defer file.Close() + + if _, err := io.Copy(file, r); err != nil { + return fmt.Errorf("unable to write file: %v", err) + } + + return nil +} + +// getContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func getContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(archive.HeaderSize) + if err != nil && err != io.EOF { + return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if archive.IsArchive(magic) { + return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil + } + + // Input should be read as a Dockerfile. + tmpDir, err := ioutil.TempDir("", "docker-build-context-") + if err != nil { + return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + f, err := os.Create(filepath.Join(tmpDir, api.DefaultDockerfileName)) + if err != nil { + return nil, "", err + } + _, err = io.Copy(f, buf) + if err != nil { + f.Close() + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + if err := r.Close(); err != nil { + return nil, "", err + } + + tar, err := archive.Tar(tmpDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(tmpDir) + return err + }), api.DefaultDockerfileName, nil + +} + +// getContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", fmt.Errorf("unable to find 'git': %v", err) + } + if absContextDir, err = gitutils.Clone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// getContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return getContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// getContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func getContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + ref, err := reference.ParseNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.WithDefaultTag(ref) + if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { + trustedRef, err := translator(ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + var content io.Reader = tarReader + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go new file mode 100644 index 00000000..2ad3afab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go @@ -0,0 +1,173 @@ +package client + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/client" + "github.com/docker/go-connections/tlsconfig" +) + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + // initializing closure + init func() error + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // inFd holds the file descriptor of the client's STDIN (if valid). + inFd uintptr + // outFd holds file descriptor of the client's STDOUT (if valid). + outFd uintptr + // isTerminalIn indicates whether the client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut indicates whether the client's STDOUT is a TTY + isTerminalOut bool + // client is the http client that performs all API operations + client client.APIClient +} + +// Initialize calls the init function that will setup the configuration for the client +// such as the TLS, tcp and other parameters used to run the client. +func (cli *DockerCli) Initialize() error { + if cli.init == nil { + return nil + } + return cli.init() +} + +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + +// PsFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) PsFormat() string { + return cli.configFile.PsFormat +} + +// ImagesFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) ImagesFormat() string { + return cli.configFile.ImagesFormat +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). +func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { + cli := &DockerCli{ + in: in, + out: out, + err: err, + keyFile: clientFlags.Common.TrustKey, + } + + cli.init = func() error { + clientFlags.PostParse() + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) + } + cli.configFile = configFile + + host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + customHeaders := cli.configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" + + verStr := api.DefaultVersion.String() + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + clientTransport, err := newClientTransport(clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + client, err := client.NewClient(host, verStr, clientTransport, customHeaders) + if err != nil { + return err + } + cli.client = client + + if cli.in != nil { + cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) + } + if cli.out != nil { + cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) + } + + return nil + } + + return cli +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + defaultHost := opts.DefaultTCPHost + if tlsOptions != nil { + defaultHost = opts.DefaultTLSHost + } + + host, err = opts.ParseHost(defaultHost, host) + return +} + +func newClientTransport(tlsOptions *tlsconfig.Options) (*http.Transport, error) { + if tlsOptions == nil { + return &http.Transport{}, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + return &http.Transport{ + TLSClientConfig: config, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go new file mode 100644 index 00000000..4cfce5f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go @@ -0,0 +1,5 @@ +// Package client provides a command-line interface for Docker. +// +// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. +// See https://docs.docker.com/installation/ for instructions on installing Docker. +package client diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go new file mode 100644 index 00000000..4c0d5c28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go @@ -0,0 +1,83 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// CmdCommit creates a new image from a container's changes. +// +// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + cmd.Require(flag.Max, 2) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + name = cmd.Arg(0) + repositoryAndTag = cmd.Arg(1) + repositoryName string + tag string + ) + + //Check if the given image name can be resolved + if repositoryAndTag != "" { + ref, err := reference.ParseNamed(repositoryAndTag) + if err != nil { + return err + } + + repositoryName = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot commit to digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + } + + var config *container.Config + if *flConfig != "" { + config = &container.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + + options := types.ContainerCommitOptions{ + ContainerID: name, + RepositoryName: repositoryName, + Tag: tag, + Comment: *flComment, + Author: *flAuthor, + Changes: flChanges.GetAll(), + Pause: *flPause, + Config: config, + } + + response, err := cli.client.ContainerCommit(options) + if err != nil { + return err + } + + fmt.Fprintln(cli.out, response.ID) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go new file mode 100644 index 00000000..43573d90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go @@ -0,0 +1,296 @@ +package client + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" +) + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// CmdCp copies files/folders to or from a path in a container. +// +// When copying from a container, if DEST_PATH is '-' the data is written as a +// tar archive file to STDOUT. +// +// When copying to a container, if SRC_PATH is '-' the data is read as a tar +// archive file from STDIN, and the destination CONTAINER:DEST_PATH, must specify +// a directory. +// +// Usage: +// docker cp CONTAINER:SRC_PATH DEST_PATH|- +// docker cp SRC_PATH|- CONTAINER:DEST_PATH +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := Cli.Subcmd( + "cp", + []string{"CONTAINER:SRC_PATH DEST_PATH|-", "SRC_PATH|- CONTAINER:DEST_PATH"}, + strings.Join([]string{ + Cli.DockerCommands["cp"].Description, + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + true, + ) + + followLink := cmd.Bool([]string{"L", "-follow-link"}, false, "Always follow symbol link in SRC_PATH") + + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("source can not be empty") + } + if cmd.Arg(1) == "" { + return fmt.Errorf("destination can not be empty") + } + + srcContainer, srcPath := splitCpArg(cmd.Arg(0)) + dstContainer, dstPath := splitCpArg(cmd.Arg(1)) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: *followLink, + } + + switch direction { + case fromContainer: + return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} + +func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { + return cli.client.ContainerStatPath(containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := cli.statContainerPath(srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := cli.client.CopyFromContainer(srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := cli.statContainerPath(dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + ContainerID: dstContainer, + Path: resolvedDstPath, + Content: content, + AllowOverwriteDirWithFile: false, + } + + return cli.client.CopyToContainer(options) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go new file mode 100644 index 00000000..3cf8a7a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go @@ -0,0 +1,177 @@ +package client + +import ( + "fmt" + "io" + "os" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + var tag string + switch x := reference.WithDefaultTag(ref).(type) { + case reference.Canonical: + tag = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + // Resolve the Auth config relevant for this server + encodedAuth, err := cli.encodeRegistryAuth(repoInfo.Index) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + Parent: ref.Name(), + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImageCreate(options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, out, cli.outFd, cli.isTerminalOut, nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + ref, err := reference.ParseNamed(config.Image) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + var trustedRef reference.Canonical + + if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { + var err error + trustedRef, err = cli.trustedReference(ref) + if err != nil { + return nil, err + } + config.Image = trustedRef.String() + } + + //create the container + response, err := cli.client.ContainerCreate(config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if client.IsErrImageNotFound(err) { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { + if err := cli.tagTrusted(trustedRef, ref); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = cli.client.ContainerCreate(config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} + +// CmdCreate creates a new container from a given image. +// +// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) + addTrustedFlags(cmd, true) + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + if config.Image == "" { + cmd.Usage() + return nil + } + response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", response.ID) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go new file mode 100644 index 00000000..f180e001 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go @@ -0,0 +1,47 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdDiff shows changes on a container's filesystem. +// +// Each changed file is printed on a separate line, prefixed with a single +// character that indicates the status of the file: C (modified), A (added), +// or D (deleted). +// +// Usage: docker diff CONTAINER +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + + changes, err := cli.client.ContainerDiff(cmd.Arg(0)) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go new file mode 100644 index 00000000..c2a6ab3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go @@ -0,0 +1,108 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdEvents prints a live stream of real time events from the server. +// +// Usage: docker events [OPTIONS] +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) + since := cmd.String([]string{"-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + eventFilterArgs := filters.NewArgs() + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } + + options := types.EventsOptions{ + Since: *since, + Until: *until, + Filters: eventFilterArgs, + } + + responseBody, err := cli.client.Events(options) + if err != nil { + return err + } + defer responseBody.Close() + + return streamEvents(responseBody, cli.out) +} + +// streamEvents decodes prints the incoming events in the provided output. +func streamEvents(input io.Reader, output io.Writer) error { + return decodeEvents(input, func(event eventtypes.Message, err error) error { + if err != nil { + return err + } + printOutput(event, output) + return nil + }) +} + +type eventProcessor func(event eventtypes.Message, err error) error + +func decodeEvents(input io.Reader, ep eventProcessor) error { + dec := json.NewDecoder(input) + for { + var event eventtypes.Message + err := dec.Decode(&event) + if err != nil && err == io.EOF { + break + } + + if procErr := ep(event, err); procErr != nil { + return procErr + } + } + return nil +} + +// printOutput prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func printOutput(event eventtypes.Message, output io.Writer) { + if event.TimeNano != 0 { + fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + for k, v := range event.Actor.Attributes { + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(output, "\n") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go new file mode 100644 index 00000000..0ce9e81f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go @@ -0,0 +1,158 @@ +package client + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/engine-api/types" +) + +// CmdExec runs a command in a running container. +// +// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + execConfig, err := ParseExec(cmd, args) + // just in case the ParseExec does not exit + if execConfig.Container == "" || err != nil { + return Cli.StatusError{StatusCode: 1} + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = cli.configFile.DetachKeys + + response, err := cli.client.ContainerExecCreate(*execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := cli.client.ContainerExecStart(execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + resp, err := cli.client.ContainerExecAttach(execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} + +// ParseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +// If the minimal number of specified args is not right or if specified args are +// not valid, it will return an error. +func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string + ) + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return nil, err + } + container = cmd.Arg(0) + parsedArgs := cmd.Args() + execCmd = parsedArgs[1:] + + execConfig := &types.ExecConfig{ + User: *flUser, + Privileged: *flPrivileged, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go new file mode 100644 index 00000000..ec94f672 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go @@ -0,0 +1,45 @@ +package client + +import ( + "errors" + "io" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdExport exports a filesystem as a tar archive. +// +// The tar archive is streamed to STDOUT by default or written to a file. +// +// Usage: docker export [OPTIONS] CONTAINER +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + var ( + output = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ContainerExport(cmd.Arg(0)) + if err != nil { + return err + } + defer responseBody.Close() + + _, err = io.Copy(output, responseBody) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/custom.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/custom.go new file mode 100644 index 00000000..8a680705 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/custom.go @@ -0,0 +1,222 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" +) + +const ( + tableKey = "table" + + containerIDHeader = "CONTAINER ID" + imageHeader = "IMAGE" + namesHeader = "NAMES" + commandHeader = "COMMAND" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +type containerContext struct { + baseSubContext + trunc bool + c types.Container +} + +func (c *containerContext) ID() string { + c.addHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.addHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.addHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.addHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Truncate(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.addHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.addHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.addHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.addHeader(sizeHeader) + srw := units.HumanSize(float64(c.c.SizeRw)) + sv := units.HumanSize(float64(c.c.SizeRootFs)) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.addHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.addHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +type imageContext struct { + baseSubContext + trunc bool + i types.Image + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.addHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.addHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.addHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.addHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.addHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.addHeader(sizeHeader) + return units.HumanSize(float64(c.i.Size)) +} + +type subContext interface { + fullHeader() string + addHeader(header string) +} + +type baseSubContext struct { + header []string +} + +func (c *baseSubContext) fullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +func (c *baseSubContext) addHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/formatter.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/formatter.go new file mode 100644 index 00000000..749148ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/formatter.go @@ -0,0 +1,254 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +const ( + tableFormatKey = "table" + rawFormatKey = "raw" + + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultQuietFormat = "{{.ID}}" +) + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format string + // Quiet when set to true will simply print minimal information. + Quiet bool + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + table bool + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preformat() { + c.finalFormat = c.Format + + if strings.HasPrefix(c.Format, tableKey) { + c.table = true + c.finalFormat = c.finalFormat[len(tableKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := template.New("").Parse(c.finalFormat) + if err != nil { + c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + } + return tmpl, err +} + +func (c *Context) postformat(tmpl *template.Template, subContext subContext) { + if c.table { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.fullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + return err + } + if c.table && len(c.header) == 0 { + c.header = subContext.fullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. +type ContainerContext struct { + Context + // Size when set to true will display the size of the output. + Size bool + // Containers + Containers []types.Container +} + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool + // Images + Images []types.Image +} + +func (ctx ContainerContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultContainerTableFormat + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `container_id: {{.ID}}` + } else { + ctx.Format = `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{.Status}} +names: {{.Names}} +labels: {{.Labels}} +ports: {{.Ports}} +` + if ctx.Size { + ctx.Format += `size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Size { + ctx.finalFormat += "\t{{.Size}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, container := range ctx.Containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + err = ctx.contextFormat(tmpl, containerCtx) + if err != nil { + return + } + } + + ctx.postformat(tmpl, &containerContext{}) +} + +func (ctx ImageContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultImageTableFormat + if ctx.Digest { + ctx.Format = defaultImageTableFormatWithDigest + } + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `image_id: {{.ID}}` + } else { + if ctx.Digest { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } else { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { + ctx.finalFormat += "\t{{.Digest}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, image := range ctx.Images { + + repoTags := image.RepoTags + repoDigests := image.RepoDigests + + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigests so we only show it once below + repoDigests = []string{} + } + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo := "" + tag := "" + digest := "" + + if !strings.HasPrefix(repoAndRef, "") { + ref, err := reference.ParseNamed(repoAndRef) + if err != nil { + continue + } + repo = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + digest = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + } + imageCtx := &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: digest, + } + err = ctx.contextFormat(tmpl, imageCtx) + if err != nil { + return + } + } + } + + ctx.postformat(tmpl, &imageContext{}) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go new file mode 100644 index 00000000..ea4b5e38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go @@ -0,0 +1,78 @@ +package client + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) holdHijackedConnection(setRawTerminal bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var ( + err error + oldState *term.State + ) + if inputStream != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.inFd, oldState) + } + + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + defer func() { + if inputStream != nil { + if setRawTerminal && cli.isTerminalIn { + term.RestoreTerminal(cli.inFd, oldState) + } + inputStream.Close() + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + logrus.Debugf("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + logrus.Debugf("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go new file mode 100644 index 00000000..5409b74a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go @@ -0,0 +1,74 @@ +package client + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" +) + +// CmdHistory shows the history of an image. +// +// Usage: docker history [OPTIONS] IMAGE +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + history, err := cli.client.ImageHistory(cmd.Arg(0)) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + if *quiet { + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if *noTrunc == false { + createdBy = stringutils.Truncate(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if *human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSize(float64(entry.Size)) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go new file mode 100644 index 00000000..2b0b4045 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go @@ -0,0 +1,79 @@ +package client + +import ( + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. +// +// Usage: docker images [OPTIONS] [REPOSITORY] +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") + format := cmd.String([]string{"-format"}, "", "Pretty-print images using a Go template") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + var matchName string + if cmd.NArg() == 1 { + matchName = cmd.Arg(0) + } + + options := types.ImageListOptions{ + MatchName: matchName, + All: *all, + Filters: imageFilterArgs, + } + + images, err := cli.client.ImageList(options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.ImagesFormat()) > 0 && !*quiet { + f = cli.ImagesFormat() + } else { + f = "table" + } + } + + imagesCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Digest: *showDigests, + Images: images, + } + + imagesCtx.Write() + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go new file mode 100644 index 00000000..5b812206 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go @@ -0,0 +1,80 @@ +package client + +import ( + "fmt" + "io" + "os" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. +// +// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. +// +// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + in io.Reader + tag string + src = cmd.Arg(0) + srcName = src + repository = cmd.Arg(1) + changes = flChanges.GetAll() + ) + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") + tag = cmd.Arg(2) + } + + if repository != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(repository); err != nil { + return err + } + } + + if src == "-" { + in = cli.in + } else if !urlutil.IsURL(src) { + srcName = "-" + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + in = file + } + + options := types.ImageImportOptions{ + Source: in, + SourceName: srcName, + RepositoryName: repository, + Message: *message, + Tag: tag, + Changes: changes, + } + + responseBody, err := cli.client.ImageImport(options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go new file mode 100644 index 00000000..42f06826 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go @@ -0,0 +1,144 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-units" +) + +// CmdInfo displays system-wide information. +// +// Usage: docker info +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + info, err := cli.client.Info() + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + + // print a warning if devicemapper is using a loopback file + if pair[0] == "Data loop file" { + fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") + } + } + + } + ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + + fmt.Fprintf(cli.out, "Plugins: \n") + fmt.Fprintf(cli.out, " Volume:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(cli.out, "\n") + fmt.Fprintf(cli.out, " Network:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(cli.out, "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(cli.out, " Authorization:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(cli.out, "\n") + } + + ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) + + if info.Debug { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) + fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) + fmt.Fprintf(cli.out, " Init SHA1: %s\n", info.InitSha1) + fmt.Fprintf(cli.out, " Init Path: %s\n", info.InitPath) + fmt.Fprintf(cli.out, " Docker Root Dir: %s\n", info.DockerRootDir) + } + + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) + } + } + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + if !info.MemoryLimit { + fmt.Fprintln(cli.err, "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(cli.err, "WARNING: No swap limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(cli.err, "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(cli.err, "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + if info.Labels != nil { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + + ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go new file mode 100644 index 00000000..5401d3bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go @@ -0,0 +1,133 @@ +package client + +import ( + "encoding/json" + "fmt" + "text/template" + + "github.com/docker/docker/api/client/inspect" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/client" +) + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +// CmdInspect displays low-level information on one or more containers or images. +// +// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { + return fmt.Errorf("%q is not a valid value for --type", *inspectType) + } + + var elementSearcher inspectSearcher + switch *inspectType { + case "container": + elementSearcher = cli.inspectContainers(*size) + case "image": + elementSearcher = cli.inspectImages(*size) + default: + elementSearcher = cli.inspectAll(*size) + } + + return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) +} + +func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ContainerInspectWithRaw(ref, getSize) + } +} + +func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ImageInspectWithRaw(ref, getSize) + } +} + +func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + c, rawContainer, err := cli.client.ContainerInspectWithRaw(ref, getSize) + if err != nil { + // Search for image with that id if a container doesn't exist. + if client.IsErrContainerNotFound(err) { + i, rawImage, err := cli.client.ImageInspectWithRaw(ref, getSize) + if err != nil { + if client.IsErrImageNotFound(err) { + return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) + } + return nil, nil, err + } + return i, rawImage, err + } + return nil, nil, err + } + return c, rawContainer, err + } +} + +type inspectSearcher func(ref string) (interface{}, []byte, error) + +func (cli *DockerCli) inspectElements(tmplStr string, references []string, searchByReference inspectSearcher) error { + elementInspector, err := cli.newInspectorWithTemplate(tmplStr) + if err != nil { + return Cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := searchByReference(ref) + if err != nil { + inspectErr = err + break + } + + if err := elementInspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := elementInspector.Flush(); err != nil { + cli.inspectErrorStatus(err) + } + + if status := cli.inspectErrorStatus(inspectErr); status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) inspectErrorStatus(err error) (status int) { + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + } + return +} + +func (cli *DockerCli) newInspectorWithTemplate(tmplStr string) (inspect.Inspector, error) { + elementInspector := inspect.NewIndentedInspector(cli.out) + if tmplStr != "" { + tmpl, err := template.New("").Funcs(funcMap).Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + elementInspector = inspect.NewTemplateInspector(cli.out, tmpl) + } + return elementInspector, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector.go new file mode 100644 index 00000000..a1d16d47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement, err) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go14.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go14.go new file mode 100644 index 00000000..39a0510c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go14.go @@ -0,0 +1,40 @@ +// +build !go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// tryeRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. +// It doesn't allow to modify this behavior either, sending messages to the output. +// We assume that the template is invalid when there is a , if the template was valid +// we'd get or "" values. In that case we fail with the original error raised executing the +// template with the typed input. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, originalErr error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + if rawErr := i.tmpl.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + if strings.Contains(buffer.String(), "") { + return fmt.Errorf("Template parsing error: %v", originalErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go15.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go15.go new file mode 100644 index 00000000..b098f415 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go15.go @@ -0,0 +1,29 @@ +// +build go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" +) + +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, _ error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go new file mode 100644 index 00000000..5e8e5b07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdKill kills one or more running container using SIGKILL or a specified signal. +// +// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerKill(name, *signal); err != nil { + errs = append(errs, fmt.Sprintf("Failed to kill container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go new file mode 100644 index 00000000..82cf34d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go @@ -0,0 +1,45 @@ +package client + +import ( + "io" + "os" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLoad loads an image from a tar archive. +// +// The tar archive is read from STDIN by default, or from a tar archive file. +// +// Usage: docker load [OPTIONS] +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + var input io.Reader = cli.in + if *infile != "" { + file, err := os.Open(*infile) + if err != nil { + return err + } + defer file.Close() + input = file + } + + response, err := cli.client.ImageLoad(input) + if err != nil { + return err + } + defer response.Body.Close() + + if response.JSON { + return jsonmessage.DisplayJSONMessagesStream(response.Body, cli.out, cli.outFd, cli.isTerminalOut, nil) + } + + _, err = io.Copy(cli.out, response.Body) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go new file mode 100644 index 00000000..a396450d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go @@ -0,0 +1,143 @@ +package client + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdLogin logs in or registers a user to a Docker registry service. +// +// If no server is specified, the user will be logged into or registered to the registry's index server. +// +// Usage: docker login SERVER +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) + cmd.Require(flag.Max, 1) + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + + cmd.ParseFlags(args, true) + + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = os.Stdin + } + + serverAddress := registry.IndexServer + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + authconfig, ok := cli.configFile.AuthConfigs[serverAddress] + if !ok { + authconfig = types.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + username = strings.TrimSpace(username) + if username == "" { + username = authconfig.Username + } + } + // Assume that a different username means they may not want to use + // the password or email from the config file, so prompt them + if username != authconfig.Username { + if password == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + // However, if they don't override the username use the + // password or email from the cmd line if specified. IOW, allow + // then to change/override them. And if not specified, just + // use what's in the config file + if password == "" { + password = authconfig.Password + } + if email == "" { + email = authconfig.Email + } + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.AuthConfigs[serverAddress] = authconfig + + auth := cli.configFile.AuthConfigs[serverAddress] + response, err := cli.client.RegistryLogin(auth) + if err != nil { + if client.IsErrUnauthorized(err) { + delete(cli.configFile.AuthConfigs, serverAddress) + if err2 := cli.configFile.Save(); err2 != nil { + fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2) + } + } + return err + } + + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Error saving config file: %v", err) + } + fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename()) + + if response.Status != "" { + fmt.Fprintf(cli.out, "%s\n", response.Status) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go new file mode 100644 index 00000000..3753cbbe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go @@ -0,0 +1,39 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/registry" +) + +// CmdLogout logs a user out from a Docker registry. +// +// If no server is specified, the user will be logged out from the registry's index server. +// +// Usage: docker logout [SERVER] +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + serverAddress := registry.IndexServer + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + return nil + } + + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.AuthConfigs, serverAddress) + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go new file mode 100644 index 00000000..92b75e05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go @@ -0,0 +1,63 @@ +package client + +import ( + "fmt" + "io" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +// CmdLogs fetches the logs of a given container. +// +// docker logs [OPTIONS] CONTAINER +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + + c, err := cli.client.ContainerInspect(name) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ContainerID: name, + ShowStdout: true, + ShowStderr: true, + Since: *since, + Timestamps: *times, + Follow: *follow, + Tail: *tail, + } + responseBody, err := cli.client.ContainerLogs(options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(cli.out, responseBody) + } else { + _, err = stdcopy.StdCopy(cli.out, cli.err, responseBody) + } + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go new file mode 100644 index 00000000..56adabc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go @@ -0,0 +1,378 @@ +package client + +import ( + "fmt" + "net" + "strings" + "text/tabwriter" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" +) + +// CmdNetwork is the parent subcommand for all network commands +// +// Usage: docker network [OPTIONS] +func (cli *DockerCli) CmdNetwork(args ...string) error { + cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) + cmd.Require(flag.Min, 1) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdNetworkCreate creates a new network with a given name +// +// Usage: docker network create [OPTIONS] +func (cli *DockerCli) CmdNetworkCreate(args ...string) error { + cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) + flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") + flOpts := opts.NewMapOpts(nil, nil) + + flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") + flIpamSubnet := opts.NewListOpts(nil) + flIpamIPRange := opts.NewListOpts(nil) + flIpamGateway := opts.NewListOpts(nil) + flIpamAux := opts.NewMapOpts(nil, nil) + flIpamOpt := opts.NewMapOpts(nil, nil) + + cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") + cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") + cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") + cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") + cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") + cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options") + + flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network") + + cmd.Require(flag.Exact, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Set the default driver to "" if the user didn't set the value. + // That way we can know whether it was user input or not. + driver := *flDriver + if !cmd.IsSet("-driver") && !cmd.IsSet("d") { + driver = "" + } + + ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Name: cmd.Arg(0), + Driver: driver, + IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()}, + Options: flOpts.GetAll(), + CheckDuplicate: true, + Internal: *flInternal, + } + + resp, err := cli.client.NetworkCreate(nc) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", resp.ID) + return nil +} + +// CmdNetworkRm deletes one or more networks +// +// Usage: docker network rm NETWORK-NAME|NETWORK-ID [NETWORK-NAME|NETWORK-ID...] +func (cli *DockerCli) CmdNetworkRm(args ...string) error { + cmd := Cli.Subcmd("network rm", []string{"NETWORK [NETWORK...]"}, "Deletes one or more networks", false) + cmd.Require(flag.Min, 1) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + status := 0 + for _, net := range cmd.Args() { + if err := cli.client.NetworkRemove(net); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +// CmdNetworkConnect connects a container to a network +// +// Usage: docker network connect [OPTIONS] +func (cli *DockerCli) CmdNetworkConnect(args ...string) error { + cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) + flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address") + flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address") + flLinks := opts.NewListOpts(runconfigopts.ValidateLink) + cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") + flAliases := opts.NewListOpts(nil) + cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container") + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: *flIPAddress, + IPv6Address: *flIPv6Address, + }, + Links: flLinks.GetAll(), + Aliases: flAliases.GetAll(), + } + return cli.client.NetworkConnect(cmd.Arg(0), cmd.Arg(1), epConfig) +} + +// CmdNetworkDisconnect disconnects a container from a network +// +// Usage: docker network disconnect +func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { + cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) + force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network") + cmd.Require(flag.Exact, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1), *force) +} + +// CmdNetworkLs lists all the networks managed by docker daemon +// +// Usage: docker network ls [OPTIONS] +func (cli *DockerCli) CmdNetworkLs(args ...string) error { + cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Consolidate all filter flags, and sanity check them early. + // They'll get process after get response from server. + netFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { + return err + } + } + + options := types.NetworkListOptions{ + Filters: netFilterArgs, + } + + networkResources, err := cli.client.NetworkList(options) + if err != nil { + return err + } + + wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + // unless quiet (-q) is specified, print field titles + if !*quiet { + fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") + } + + for _, networkResource := range networkResources { + ID := networkResource.ID + netName := networkResource.Name + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + if *quiet { + fmt.Fprintln(wr, ID) + continue + } + driver := networkResource.Driver + fmt.Fprintf(wr, "%s\t%s\t%s\t", + ID, + netName, + driver) + fmt.Fprint(wr, "\n") + } + wr.Flush() + return nil +} + +// CmdNetworkInspect inspects the network object for more details +// +// Usage: docker network inspect [OPTIONS] [NETWORK...] +func (cli *DockerCli) CmdNetworkInspect(args ...string) error { + cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on one or more networks", false) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Min, 1) + + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.NetworkInspect(name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} + +func networkUsage() string { + networkCommands := map[string]string{ + "create": "Create a network", + "connect": "Connect container to a network", + "disconnect": "Disconnect container from a network", + "inspect": "Display detailed network information", + "ls": "List all networks", + "rm": "Remove a network", + } + + help := "Commands:\n" + + for cmd, description := range networkCommands { + help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) + } + + help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") + return help +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go new file mode 100644 index 00000000..dab4eb2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdPause pauses all processes within one or more containers. +// +// Usage: docker pause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerPause(name); err != nil { + errs = append(errs, fmt.Sprintf("Failed to pause container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go new file mode 100644 index 00000000..0dd3da45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go @@ -0,0 +1,59 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/nat" +) + +// CmdPort lists port mappings for a container. +// If a private port is specified, it also shows the public-facing port that is NATed to the private port. +// +// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(cmd.Arg(0)) + if err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go new file mode 100644 index 00000000..c0589738 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go @@ -0,0 +1,87 @@ +package client + +import ( + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdPs outputs a list of Docker containers. +// +// Usage: docker ps [OPTIONS] +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.NewArgs() + + cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noTrunc = cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container (includes all states)") + since = cmd.String([]string{"#-since"}, "", "Show containers created since Id or Name (includes all states)") + before = cmd.String([]string{"#-before"}, "", "Only show containers created before Id or Name") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers (includes all states)") + format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") + flFilter = opts.NewListOpts(nil) + ) + cmd.Require(flag.Exact, 0) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.ParseFlags(args, true) + if *last == -1 && *nLatest { + *last = 1 + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + options := types.ContainerListOptions{ + All: *all, + Limit: *last, + Since: *since, + Before: *before, + Size: *size, + Filter: psFilterArgs, + } + + containers, err := cli.client.ContainerList(options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.PsFormat()) > 0 && !*quiet { + f = cli.PsFormat() + } else { + f = "table" + } + } + + psCtx := formatter.ContainerContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Size: *size, + Containers: containers, + } + + psCtx.Write() + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go new file mode 100644 index 00000000..19220b96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go @@ -0,0 +1,87 @@ +package client + +import ( + "errors" + "fmt" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPull pulls an image or a repository from the registry. +// +// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + addTrustedFlags(cmd, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + remote := cmd.Arg(0) + + distributionRef, err := reference.ParseNamed(remote) + if err != nil { + return err + } + if *allTags && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !*allTags && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(cli.out, "Using default tag: %s\n", reference.DefaultTag) + } + + var tag string + switch x := distributionRef.(type) { + case reference.Canonical: + tag = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + + ref := registry.ParseReference(tag) + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") + + if isTrusted() && !ref.HasDigest() { + // Check if tag is digest + return cli.trustedPull(repoInfo, ref, authConfig, requestPrivilege) + } + + return cli.imagePullPrivileged(authConfig, distributionRef.String(), "", requestPrivilege) +} + +func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) error { + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImagePull(options, requestPrivilege) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go new file mode 100644 index 00000000..9e4972c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go @@ -0,0 +1,74 @@ +package client + +import ( + "errors" + "io" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPush pushes an image or repository to the registry. +// +// Usage: docker push NAME[:TAG] +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) + addTrustedFlags(cmd, false) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(0)) + if err != nil { + return err + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") + if isTrusted() { + return cli.trustedPush(repoInfo, tag, authConfig, requestPrivilege) + } + + responseBody, err := cli.imagePushPrivileged(authConfig, ref.Name(), tag, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} + +func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + return cli.client.ImagePush(options, requestPrivilege) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go new file mode 100644 index 00000000..a67d5d02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRename renames a container. +// +// Usage: docker rename OLD_NAME NEW_NAME +func (cli *DockerCli) CmdRename(args ...string) error { + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + oldName := strings.TrimSpace(cmd.Arg(0)) + newName := strings.TrimSpace(cmd.Arg(1)) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := cli.client.ContainerRename(oldName, newName); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go new file mode 100644 index 00000000..245aca54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRestart restarts one or more containers. +// +// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerRestart(name, *nSeconds); err != nil { + errs = append(errs, fmt.Sprintf("Failed to kill container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go new file mode 100644 index 00000000..a6c4d9d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go @@ -0,0 +1,48 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "-link"}, false, "Remove the specified link") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + name = strings.Trim(name, "/") + + options := types.ContainerRemoveOptions{ + ContainerID: name, + RemoveVolumes: *v, + RemoveLinks: *link, + Force: *force, + } + + if err := cli.client.ContainerRemove(options); err != nil { + errs = append(errs, fmt.Sprintf("Failed to remove container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go new file mode 100644 index 00000000..a190ab38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go @@ -0,0 +1,57 @@ +package client + +import ( + "fmt" + "net/url" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRmi removes all images with the specified name(s). +// +// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdRmi(args ...string) error { + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var errs []string + for _, name := range cmd.Args() { + options := types.ImageRemoveOptions{ + ImageID: name, + Force: *force, + PruneChildren: !*noprune, + } + + dels, err := cli.client.ImageRemove(options) + if err != nil { + errs = append(errs, fmt.Sprintf("Failed to remove image (%s): %s", name, err)) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) + } + } + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go new file mode 100644 index 00000000..3b3a1a2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go @@ -0,0 +1,284 @@ +package client + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/libnetwork/resolvconf/dns" +) + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +// if container start fails with 'command not found' error, return 127 +// if container start fails with 'command cannot be invoked' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") + statusError := Cli.StatusError{} + derrCmdNotFound := derr.ErrorCodeCmdNotFound.Message() + derrCouldNotInvoke := derr.ErrorCodeCmdCouldNotBeInvoked.Message() + derrNoSuchImage := derr.ErrorCodeNoSuchImageHash.Message() + derrNoSuchImageTag := derr.ErrorCodeNoSuchImageTag.Message() + switch trimmedErr { + case derrCmdNotFound: + statusError = Cli.StatusError{StatusCode: 127} + case derrCouldNotInvoke: + statusError = Cli.StatusError{StatusCode: 126} + case derrNoSuchImage, derrNoSuchImageTag: + statusError = Cli.StatusError{StatusCode: 125} + default: + statusError = Cli.StatusError{StatusCode: 125} + } + return statusError +} + +// CmdRun runs a command in a new container. +// +// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdRun(args ...string) error { + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) + addTrustedFlags(cmd, true) + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + // just in case the Parse does not exit + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(125) + } + + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if len(hostConfig.DNS) > 0 { + // check the DNS settings passed via --dns against + // localhost regexp to warn if they are trying to + // set a DNS to a localhost address + for _, dnsIP := range hostConfig.DNS { + if dns.IsLocalhost(dnsIP) { + fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + break + } + } + } + if config.Image == "" { + cmd.Usage() + return nil + } + + config.ArgsEscaped = false + + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := cmd.Lookup("-attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy when in TTY mode + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() + } + + createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + cmd.ReportError(err.Error(), true) + return runStartContainerErr(err) + } + if sigProxy { + sigc := cli.forwardAllSignals(createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(cli.out, "%s\n", createResponse.ID) + }() + } + if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { + return ErrConflictRestartPolicyAndAutoRemove + } + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = cli.in + } + if config.AttachStdout { + out = cli.out + } + if config.AttachStderr { + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + if *flDetachKeys != "" { + cli.configFile.DetachKeys = *flDetachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: createResponse.ID, + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: cli.configFile.DetachKeys, + } + + resp, err := cli.client.ContainerAttach(options) + if err != nil { + return err + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) + }) + } + + defer func() { + if *flAutoRemove { + options := types.ContainerRemoveOptions{ + ContainerID: createResponse.ID, + RemoveVolumes: true, + } + if err := cli.client.ContainerRemove(options); err != nil { + fmt.Fprintf(cli.err, "Error deleting container: %s\n", err) + } + } + }() + + //start the container + if err := cli.client.ContainerStart(createResponse.ID); err != nil { + cmd.ReportError(err.Error(), false) + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if status, err = cli.client.ContainerWait(createResponse.ID); err != nil { + return runStartContainerErr(err) + } + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } else { + // No Autoremove: Simply retrieve the exit code + if !config.Tty { + // In non-TTY mode, we can't detach, so we must wait for container exit + if status, err = cli.client.ContainerWait(createResponse.ID); err != nil { + return err + } + } else { + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go new file mode 100644 index 00000000..e7e0166d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go @@ -0,0 +1,46 @@ +package client + +import ( + "errors" + "io" + "os" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdSave saves one or more images to a tar archive. +// +// The tar archive is written to STDOUT by default, or written to a file. +// +// Usage: docker save [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + output = cli.out + err error + ) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + if *outfile != "" { + if output, err = os.Create(*outfile); err != nil { + return err + } + } + + responseBody, err := cli.client.ImageSave(cmd.Args()) + if err != nil { + return err + } + defer responseBody.Close() + + _, err = io.Copy(output, responseBody) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go new file mode 100644 index 00000000..64bbb67e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go @@ -0,0 +1,91 @@ +package client + +import ( + "fmt" + "net/url" + "sort" + "strings" + "text/tabwriter" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// CmdSearch searches the Docker Hub for images. +// +// Usage: docker search [OPTIONS] TERM +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + v := url.Values{} + v.Set("term", name) + + indexInfo, err := registry.ParseSearchIndexInfo(name) + if err != nil { + return err + } + + authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, indexInfo) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + Term: name, + RegistryAuth: encodedAuth, + } + + unorderedResults, err := cli.client.ImageSearch(options, requestPrivilege) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = stringutils.Truncate(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated || res.IsTrusted { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go new file mode 100644 index 00000000..3a2bc126 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go @@ -0,0 +1,149 @@ +package client + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.client.ContainerKill(cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +// CmdStart starts one or more containers. +// +// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *attach || *openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + containerID := cmd.Arg(0) + c, err := cli.client.ContainerInspect(containerID) + if err != nil { + return err + } + + if !c.Config.Tty { + sigc := cli.forwardAllSignals(containerID) + defer signal.StopCatch(sigc) + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: containerID, + Stream: true, + Stdin: *openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + resp, err := cli.client.ContainerAttach(options) + if err != nil { + return err + } + defer resp.Close() + + cErr := promise.Go(func() error { + return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) + }) + + // 3. Start the container. + if err := cli.client.ContainerStart(containerID); err != nil { + return err + } + + // 4. Wait for attachment to break. + if c.Config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(containerID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, containerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return cli.startContainersWithoutAttachments(cmd.Args()) + } + + return nil +} + +func (cli *DockerCli) startContainersWithoutAttachments(containerIDs []string) error { + var failedContainers []string + for _, containerID := range containerIDs { + if err := cli.client.ContainerStart(containerID); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + failedContainers = append(failedContainers, containerID) + } else { + fmt.Fprintf(cli.out, "%s\n", containerID) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go new file mode 100644 index 00000000..46f4c04b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go @@ -0,0 +1,344 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "sync" + "text/tabwriter" + "time" + + Cli "github.com/docker/docker/cli" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + "github.com/docker/go-units" +) + +type containerStats struct { + Name string + CPUPercentage float64 + Memory float64 + MemoryLimit float64 + MemoryPercentage float64 + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + mu sync.RWMutex + err error +} + +type stats struct { + mu sync.Mutex + cs []*containerStats +} + +func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { + responseBody, err := cli.client.ContainerStats(s.Name, streamStats) + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + defer responseBody.Close() + + var ( + previousCPU uint64 + previousSystem uint64 + dec = json.NewDecoder(responseBody) + u = make(chan error, 1) + ) + go func() { + for { + var v *types.StatsJSON + if err := dec.Decode(&v); err != nil { + u <- err + return + } + + var memPercent = 0.0 + var cpuPercent = 0.0 + + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + blkRead, blkWrite := calculateBlockIO(v.BlkioStats) + s.mu.Lock() + s.CPUPercentage = cpuPercent + s.Memory = float64(v.MemoryStats.Usage) + s.MemoryLimit = float64(v.MemoryStats.Limit) + s.MemoryPercentage = memPercent + s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) + s.BlockRead = float64(blkRead) + s.BlockWrite = float64(blkWrite) + s.mu.Unlock() + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.mu.Lock() + s.CPUPercentage = 0 + s.Memory = 0 + s.MemoryPercentage = 0 + s.MemoryLimit = 0 + s.NetworkRx = 0 + s.NetworkTx = 0 + s.BlockRead = 0 + s.BlockWrite = 0 + s.mu.Unlock() + case err := <-u: + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + } + if !streamStats { + return + } + } +} + +func (s *containerStats) Display(w io.Writer) error { + s.mu.RLock() + defer s.mu.RUnlock() + if s.err != nil { + return s.err + } + fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n", + s.Name, + s.CPUPercentage, + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), + s.MemoryPercentage, + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), + units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite)) + return nil +} + +// CmdStats displays a live stream of resource usage statistics for one or more containers. +// +// This shows real-time information on CPU usage, memory usage, and network I/O. +// +// Usage: docker stats [OPTIONS] [CONTAINER...] +func (cli *DockerCli) CmdStats(args ...string) error { + cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") + + cmd.ParseFlags(args, true) + + names := cmd.Args() + showAll := len(names) == 0 + + if showAll { + options := types.ContainerListOptions{ + All: *all, + } + cs, err := cli.client.ContainerList(options) + if err != nil { + return err + } + for _, c := range cs { + names = append(names, c.ID[:12]) + } + } + if len(names) == 0 && !showAll { + return fmt.Errorf("No containers found") + } + sort.Strings(names) + + var ( + cStats = stats{} + w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + ) + printHeader := func() { + if !*noStream { + fmt.Fprint(cli.out, "\033[2J") + fmt.Fprint(cli.out, "\033[H") + } + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n") + } + for _, n := range names { + s := &containerStats{Name: n} + // no need to lock here since only the main goroutine is running here + cStats.cs = append(cStats.cs, s) + go s.Collect(cli, !*noStream) + } + closeChan := make(chan error) + if showAll { + type watch struct { + cid string + event string + err error + } + getNewContainers := func(c chan<- watch) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + resBody, err := cli.client.Events(options) + if err != nil { + c <- watch{err: err} + return + } + defer resBody.Close() + + decodeEvents(resBody, func(event events.Message, err error) error { + if err != nil { + c <- watch{err: err} + return nil + } + + c <- watch{event.ID[:12], event.Action, nil} + return nil + }) + } + go func(stopChan chan<- error) { + cChan := make(chan watch) + go getNewContainers(cChan) + for { + c := <-cChan + if c.err != nil { + stopChan <- c.err + return + } + switch c.event { + case "create": + s := &containerStats{Name: c.cid} + cStats.mu.Lock() + cStats.cs = append(cStats.cs, s) + cStats.mu.Unlock() + go s.Collect(cli, !*noStream) + case "stop": + case "die": + if !*all { + var remove int + // cStats cannot be O(1) with a map cause ranging over it would cause + // containers in stats to move up and down in the list...:( + cStats.mu.Lock() + for i, s := range cStats.cs { + if s.Name == c.cid { + remove = i + break + } + } + cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...) + cStats.mu.Unlock() + } + } + } + }(closeChan) + } else { + close(closeChan) + } + // do a quick pause so that any failed connections for containers that do not exist are able to be + // evicted before we display the initial or default values. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + c.mu.Lock() + if c.err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) + } + c.mu.Unlock() + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + for range time.Tick(500 * time.Millisecond) { + printHeader() + toRemove := []int{} + cStats.mu.Lock() + for i, s := range cStats.cs { + if err := s.Display(w); err != nil && !*noStream { + toRemove = append(toRemove, i) + } + } + for j := len(toRemove) - 1; j >= 0; j-- { + i := toRemove[j] + cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) + } + if len(cStats.cs) == 0 && !showAll { + return nil + } + cStats.mu.Unlock() + w.Flush() + if *noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return nil +} + +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go new file mode 100644 index 00000000..9d429ea0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdStop stops one or more containers. +// +// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerStop(name, *nSeconds); err != nil { + errs = append(errs, fmt.Sprintf("Failed to stop container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go new file mode 100644 index 00000000..5a824714 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go @@ -0,0 +1,44 @@ +package client + +import ( + "errors" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdTag tags an image into a repository. +// +// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) + force := cmd.Bool([]string{"#f", "#-force"}, false, "Force the tagging even if there's a conflict") + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(1)) + if err != nil { + return err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + + options := types.ImageTagOptions{ + ImageID: cmd.Arg(0), + RepositoryName: ref.Name(), + Tag: tag, + Force: *force, + } + + return cli.client.ImageTag(options) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go new file mode 100644 index 00000000..90e2d7d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go @@ -0,0 +1,39 @@ +package client + +import ( + "fmt" + "strings" + "text/tabwriter" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdTop displays the running processes of a container. +// +// Usage: docker top CONTAINER +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var arguments []string + if cmd.NArg() > 1 { + arguments = cmd.Args()[1:] + } + + procList, err := cli.client.ContainerTop(cmd.Arg(0), arguments) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go new file mode 100644 index 00000000..d06db5d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go @@ -0,0 +1,452 @@ +package client + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/distribution" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + apiclient "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/notary/client" + "github.com/docker/notary/passphrase" + "github.com/docker/notary/trustmanager" + "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/signed" + "github.com/docker/notary/tuf/store" +) + +var ( + releasesRole = path.Join(data.CanonicalTargetsRole, "releases") + untrusted bool +) + +func addTrustedFlags(fs *flag.FlagSet, verify bool) { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + message := "Skip image signing" + if verify { + message = "Skip image verification" + } + fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message) +} + +func isTrusted() bool { + return !untrusted +} + +type target struct { + reference registry.Reference + digest digest.Digest + size int64 +} + +func (cli *DockerCli) trustDirectory() string { + return filepath.Join(cliconfig.ConfigDir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func (cli *DockerCli) certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil +} + +func trustServer(index *registrytypes.IndexInfo) (string, error) { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + urlObj, err := url.Parse(s) + if err != nil || urlObj.Scheme != "https" { + return "", fmt.Errorf("valid https URL required for trust server, got %s", s) + } + + return s, nil + } + if index.Official { + return registry.NotaryServer, nil + } + return "https://" + index.Name, nil +} + +type simpleCredentialStore struct { + auth types.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig) (*client.NotaryRepository, error) { + server, err := trustServer(repoInfo.Index) + if err != nil { + return nil, err + } + + var cfg = tlsconfig.ClientDefault + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := cli.certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.DockerHeaders(http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + + challengeManager := auth.NewSimpleChallengeManager() + + resp, err := pingClient.Do(req) + if err != nil { + // Ignore error on ping to operate in offline mode + logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) + } else { + defer resp.Body.Close() + + // Add response to the challenge manager to parse out + // authentication header and register authentication method + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + } + + creds := simpleCredentialStore{auth: authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.FullName(), "push", "pull") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) + tr := transport.NewTransport(base, modifiers...) + + return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.FullName(), server, tr, cli.getPassphraseRetriever()) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + reference: registry.ParseReference(t.Name), + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { + aliasMap := map[string]string{ + "root": "root", + "snapshot": "repository", + "targets": "repository", + "targets/releases": "repository", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets/releases": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + } + + // Backwards compatibility with old env names. We should remove this in 1.10 + if env["root"] == "" { + if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"); passphrase != "" { + env["root"] = passphrase + fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE\n") + } + } + if env["snapshot"] == "" || env["targets"] == "" || env["targets/releases"] == "" { + if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"); passphrase != "" { + env["snapshot"] = passphrase + env["targets"] = passphrase + env["targets/releases"] = passphrase + fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE\n") + } + } + + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + + notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, err + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + + return reference.WithDigest(ref, r.digest) +} + +func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.NamedTagged) error { + fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) + + options := types.ImageTagOptions{ + ImageID: trustedRef.String(), + RepositoryName: trustedRef.Name(), + Tag: ref.Tag(), + Force: true, + } + + return cli.client.ImageTag(options) +} + +func notaryError(repoName string, err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) + case signed.ErrExpired: + return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) + case trustmanager.ErrKeyNotFound: + return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) + case *net.OpError: + return fmt.Errorf("Error: error contacting notary server: %v", err) + case store.ErrMetaNotFound: + return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) + case signed.ErrInvalidKeyType: + return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) + case signed.ErrNoKeys: + return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) + case signed.ErrLowVersion: + return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) + case signed.ErrRoleThreshold: + return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) + case client.ErrRepositoryNotExist: + return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) + case signed.ErrInsufficientSignatures: + return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) + } + + return err +} + +func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege apiclient.RequestPrivilegeFunc) error { + var refs []target + + notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) + return err + } + + if ref.String() == "" { + // List all targets + targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole) + if err != nil { + return notaryError(repoInfo.FullName(), err) + } + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name()) + continue + } + refs = append(refs, t) + } + } else { + t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole) + if err != nil { + return notaryError(repoInfo.FullName(), err) + } + r, err := convertTarget(t.Target) + if err != nil { + return err + + } + refs = append(refs, r) + } + + for i, r := range refs { + displayTag := r.reference.String() + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) + + if err := cli.imagePullPrivileged(authConfig, repoInfo.Name(), r.digest.String(), requestPrivilege); err != nil { + return err + } + + // If reference is not trusted, tag by trusted reference + if !r.reference.HasDigest() { + tagged, err := reference.WithTag(repoInfo, r.reference.String()) + if err != nil { + return err + } + trustedRef, err := reference.WithDigest(repoInfo, r.digest) + if err != nil { + return err + } + if err := cli.tagTrusted(trustedRef, tagged); err != nil { + return err + } + } + } + return nil +} + +func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, authConfig types.AuthConfig, requestPrivilege apiclient.RequestPrivilegeFunc) error { + responseBody, err := cli.imagePushPrivileged(authConfig, repoInfo.Name(), tag, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + targets := []target{} + handleTarget := func(aux *json.RawMessage) { + var pushResult distribution.PushResult + err := json.Unmarshal(*aux, &pushResult) + if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { + targets = append(targets, target{ + reference: registry.ParseReference(pushResult.Tag), + digest: pushResult.Digest, + size: int64(pushResult.Size), + }) + } + } + + err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, handleTarget) + if err != nil { + return err + } + + if tag == "" { + fmt.Fprintf(cli.out, "No tag specified, skipping trust metadata push\n") + return nil + } + if len(targets) == 0 { + fmt.Fprintf(cli.out, "No targets found, skipping trust metadata push\n") + return nil + } + + fmt.Fprintf(cli.out, "Signing and pushing trust metadata\n") + + repo, err := cli.getNotaryRepository(repoInfo, authConfig) + if err != nil { + fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) + return err + } + + for _, target := range targets { + h, err := hex.DecodeString(target.digest.Hex()) + if err != nil { + return err + } + t := &client.Target{ + Name: target.reference.String(), + Hashes: data.Hashes{ + string(target.digest.Algorithm()): h, + }, + Length: int64(target.size), + } + if err := repo.AddTarget(t, releasesRole); err != nil { + return err + } + } + + err = repo.Publish() + if _, ok := err.(client.ErrRepoNotInitialized); !ok { + return notaryError(repoInfo.FullName(), err) + } + + keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) + + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + if err := repo.Initialize(rootKeyID); err != nil { + return notaryError(repoInfo.FullName(), err) + } + fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName()) + + return notaryError(repoInfo.FullName(), repo.Publish()) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go new file mode 100644 index 00000000..9ec3310d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go @@ -0,0 +1,32 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdUnpause unpauses all processes within a container, for one or more containers. +// +// Usage: docker unpause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerUnpause(name); err != nil { + errs = append(errs, fmt.Sprintf("Failed to unpause container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/update.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/update.go new file mode 100644 index 00000000..764a9952 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/update.go @@ -0,0 +1,104 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +// CmdUpdate updates resources of one or more containers. +// +// Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUpdate(args ...string) error { + cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) + flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") + flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + if cmd.NFlag() == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var err error + var flMemory int64 + if *flMemoryString != "" { + flMemory, err = units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if *flMemoryReservation != "" { + memoryReservation, err = units.RAMInBytes(*flMemoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if *flKernelMemory != "" { + kernelMemory, err = units.RAMInBytes(*flKernelMemory) + if err != nil { + return err + } + } + + resources := container.Resources{ + BlkioWeight: *flBlkioWeight, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUShares: *flCPUShares, + Memory: flMemory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: *flCPUPeriod, + CPUQuota: *flCPUQuota, + } + + updateConfig := container.UpdateConfig{ + Resources: resources, + } + + names := cmd.Args() + var errs []string + for _, name := range names { + if err := cli.client.ContainerUpdate(name, updateConfig); err != nil { + errs = append(errs, fmt.Sprintf("Failed to update container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go new file mode 100644 index 00000000..687add5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go @@ -0,0 +1,140 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "os" + gosignal "os/signal" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +func (cli *DockerCli) encodeRegistryAuth(index *registrytypes.IndexInfo) (string, error) { + authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, index) + return encodeAuthToBase64(authConfig) +} + +func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) + if err := cli.CmdLogin(registry.GetAuthConfigKey(index)); err != nil { + return "", err + } + return cli.encodeRegistryAuth(index) + } +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + ID: id, + Height: height, + Width: width, + } + + var err error + if isExec { + err = cli.client.ContainerExecResize(options) + } else { + err = cli.client.ContainerResize(options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { + c, err := cli.client.ContainerInspect(containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return c.State.Running, c.State.ExitCode, nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { + resp, err := cli.client.ContainerExecInspect(execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.getTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.getTtySize() + + if prevW != w || prevH != h { + cli.resizeTty(id, isExec) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + cli.resizeTty(id, isExec) + } + }() + } + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_unix.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_unix.go new file mode 100644 index 00000000..ff10ddde --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package client + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_windows.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_windows.go new file mode 100644 index 00000000..09c33dad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package client + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go new file mode 100644 index 00000000..a64deef6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go @@ -0,0 +1,92 @@ +package client + +import ( + "runtime" + "text/template" + "time" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/engine-api/types" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} + Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} + Experimental: {{.Server.Experimental}}{{end}}{{end}}` + +// CmdVersion shows Docker version information. +// +// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. +// +// Usage: docker version +func (cli *DockerCli) CmdVersion(args ...string) (err error) { + cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + templateFormat := versionTemplate + if *tmplStr != "" { + templateFormat = *tmplStr + } + + var tmpl *template.Template + if tmpl, err = template.New("").Funcs(funcMap).Parse(templateFormat); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: cli.client.ClientVersion(), + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: utils.ExperimentalBuild(), + }, + } + + serverVersion, err := cli.client.ServerVersion() + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { + err = err2 + } + cli.out.Write([]byte{'\n'}) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/volume.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/volume.go new file mode 100644 index 00000000..284e30c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/volume.go @@ -0,0 +1,160 @@ +package client + +import ( + "fmt" + "text/tabwriter" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdVolume is the parent subcommand for all volume commands +// +// Usage: docker volume +func (cli *DockerCli) CmdVolume(args ...string) error { + description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" + commands := [][]string{ + {"create", "Create a volume"}, + {"inspect", "Return low-level information on a volume"}, + {"ls", "List volumes"}, + {"rm", "Remove a volume"}, + } + + for _, cmd := range commands { + description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) + } + + description += "\nRun 'docker volume COMMAND --help' for more information on a command" + cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdVolumeLs outputs a list of Docker volumes. +// +// Usage: docker volume ls [OPTIONS] +func (cli *DockerCli) CmdVolumeLs(args ...string) error { + cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) + + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) + if err != nil { + return err + } + } + + volumes, err := cli.client.VolumeList(volFilterArgs) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + for _, warn := range volumes.Warnings { + fmt.Fprintln(cli.err, warn) + } + fmt.Fprintf(w, "DRIVER \tVOLUME NAME") + fmt.Fprintf(w, "\n") + } + + for _, vol := range volumes.Volumes { + if *quiet { + fmt.Fprintln(w, vol.Name) + continue + } + fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) + } + w.Flush() + return nil +} + +// CmdVolumeInspect displays low-level information on one or more volumes. +// +// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeInspect(args ...string) error { + cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + if err := cmd.Parse(args); err != nil { + return nil + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.VolumeInspect(name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// CmdVolumeCreate creates a new volume. +// +// Usage: docker volume create [OPTIONS] +func (cli *DockerCli) CmdVolumeCreate(args ...string) error { + cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) + flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") + flName := cmd.String([]string{"-name"}, "", "Specify volume name") + + flDriverOpts := opts.NewMapOpts(nil, nil) + cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volReq := types.VolumeCreateRequest{ + Driver: *flDriver, + DriverOpts: flDriverOpts.GetAll(), + Name: *flName, + } + + vol, err := cli.client.VolumeCreate(volReq) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", vol.Name) + return nil +} + +// CmdVolumeRm removes one or more volumes. +// +// Usage: docker volume rm VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeRm(args ...string) error { + cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + var status = 0 + + for _, name := range cmd.Args() { + if err := cli.client.VolumeRemove(name); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(cli.out, "%s\n", name) + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go new file mode 100644 index 00000000..d77a523e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdWait blocks until a container stops, then prints its exit code. +// +// If more than one container is specified, this will wait synchronously on each container. +// +// Usage: docker wait CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + status, err := cli.client.ContainerWait(name) + if err != nil { + errs = append(errs, fmt.Sprintf("Failed to wait container (%s): %s", name, err)) + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/common.go b/Godeps/_workspace/src/github.com/docker/docker/api/common.go new file mode 100644 index 00000000..51be1e27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/common.go @@ -0,0 +1,149 @@ +package api + +import ( + "fmt" + "mime" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // Version of Current REST API + DefaultVersion version.Version = "1.23" + + // MinVersion represents Minimum REST API version supported + MinVersion version.Version = "1.12" + + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first int + last int + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last int) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(start) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/fixtures/keyfile b/Godeps/_workspace/src/github.com/docker/docker/api/fixtures/keyfile new file mode 100644 index 00000000..322f2544 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/form.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 00000000..20188c12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/httputils.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 00000000..ecf26e2a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,178 @@ +package httputils + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/version" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// ParseMultipartForm ensure the request form is parsed, even with invalid content types. +func ParseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// WriteError decodes a specific docker error and sends it in the response. +func WriteError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + + statusCode := http.StatusInternalServerError + errMsg := err.Error() + + // Based on the type of error we get we need to process things + // slightly differently to extract the error message. + // In the 'errcode.*' cases there are two different type of + // error that could be returned. errocode.ErrorCode is the base + // type of error object - it is just an 'int' that can then be + // used as the look-up key to find the message. errorcode.Error + // extends errorcode.Error by adding error-instance specific + // data, like 'details' or variable strings to be inserted into + // the message. + // + // Ideally, we should just be able to call err.Error() for all + // cases but the errcode package doesn't support that yet. + // + // Additionally, in both errcode cases, there might be an http + // status code associated with it, and if so use it. + switch err.(type) { + case errcode.ErrorCode: + daError, _ := err.(errcode.ErrorCode) + statusCode = daError.Descriptor().HTTPStatusCode + errMsg = daError.Message() + + case errcode.Error: + // For reference, if you're looking for a particular error + // then you can do something like : + // import ( derr "github.com/docker/docker/errors" ) + // if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... } + + daError, _ := err.(errcode.Error) + statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode + errMsg = daError.Message + + default: + // This part of will be removed once we've + // converted everything over to use the errcode package + + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, + // we should create appropriate error types with clearly defined meaning + errStr := strings.ToLower(err.Error()) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + http.Error(w, errMsg, statusCode) +} + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver version.Version) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(version.Version) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/middleware.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 00000000..c978478e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,195 @@ +package server + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +// middleware is an adapter to allow the use of ordinary functions as Docker API filters. +// Any function that has the appropriate signature can be register as a middleware. +type middleware func(handler httputils.APIFunc) httputils.APIFunc + +// debugRequestMiddleware dumps the request to logger +func debugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("%s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + if _, exists := postForm["password"]; exists { + postForm["password"] = "*****" + } + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +// authorizationMiddleware perform authorization on the request. +func (s *Server) authorizationMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // FIXME: fill when authN gets in + // User and UserAuthNMethod are taken from AuthN plugins + // Currently tracked in https://github.com/docker/docker/pull/13994 + user := "" + userAuthNMethod := "" + authCtx := authorization.NewCtx(s.authZPlugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := authorization.NewResponseModifier(w) + + if err := handler(ctx, rw, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if err := authCtx.AuthZResponse(rw, r); err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + return nil + } +} + +// userAgentMiddleware checks the User-Agent header looking for a valid docker client spec. +func (s *Server) userAgentMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + dockerVersion := version.Version(s.cfg.Version) + + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + return handler(ctx, w, r, vars) + } +} + +// corsMiddleware sets the CORS header expectations in the server. +func (s *Server) corsMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := s.cfg.CorsHeaders + if corsHeaders == "" && s.cfg.EnableCors { + corsHeaders = "*" + } + + if corsHeaders != "" { + writeCorsHeaders(w, r, corsHeaders) + } + return handler(ctx, w, r, vars) + } +} + +// versionMiddleware checks the api version requirements before passing the request to the server handler. +func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := version.Version(vars["version"]) + if apiVersion == "" { + apiVersion = api.DefaultVersion + } + + if apiVersion.GreaterThan(api.DefaultVersion) { + return errors.ErrorCodeNewerClientVersion.WithArgs(apiVersion, api.DefaultVersion) + } + if apiVersion.LessThan(api.MinVersion) { + return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.DefaultVersion) + } + + w.Header().Set("Server", "Docker/"+dockerversion.Version+" ("+runtime.GOOS+")") + ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) + return handler(ctx, w, r, vars) + } +} + +// handleWithGlobalMiddlwares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +// +// Example: handleWithGlobalMiddlewares(s.getContainersName) +// +// s.loggingMiddleware( +// s.userAgentMiddleware( +// s.corsMiddleware( +// versionMiddleware(s.getContainersName) +// ) +// ) +// ) +// ) +func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + middlewares := []middleware{ + versionMiddleware, + s.corsMiddleware, + s.userAgentMiddleware, + } + + // Only want this on debug level + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + middlewares = append(middlewares, debugRequestMiddleware) + } + + if len(s.cfg.AuthorizationPluginNames) > 0 { + s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) + middlewares = append(middlewares, s.authorizationMiddleware) + } + + h := handler + for _, m := range middlewares { + h = m(h) + } + return h +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 00000000..766462bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,38 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +func profilerSetup(mainRouter *mux.Router, path string) { + var r = mainRouter.PathPrefix(path).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/backend.go new file mode 100644 index 00000000..fd9e314a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/backend.go @@ -0,0 +1,12 @@ +package build + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + Build() (imageID string) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build.go new file mode 100644 index 00000000..e21b6346 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build.go @@ -0,0 +1,33 @@ +package build + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" + "github.com/docker/docker/daemon" +) + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend *daemon.Daemon + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b *daemon.Daemon) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + local.NewPostRoute("/build", r.postBuild), + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build_routes.go new file mode 100644 index 00000000..5cda79c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build_routes.go @@ -0,0 +1,260 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/daemon/daemonbuilder" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/docker/utils" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.IsolationLevel(r.FormValue("isolation")); i != "" { + if !container.IsolationLevel.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.IsolationLevel = i + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]string{} + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + return options, nil +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + + repoAndTags, err := sanitizeRepoAndTags(r.Form["t"]) + if err != nil { + return errf(err) + } + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + var ( + context builder.ModifiableContext + dockerfileName string + ) + context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) + if err != nil { + return errf(err) + } + defer func() { + if err := context.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + + b, err := dockerfile.NewBuilder( + buildOptions, // result of newBuildConfig + &daemonbuilder.Docker{br.backend}, + builder.DockerIgnoreContext{ModifiableContext: context}, + nil) + if err != nil { + return errf(err) + } + if buildOptions.SuppressOutput { + b.Output = notVerboseBuffer + } else { + b.Output = output + } + b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf} + if buildOptions.SuppressOutput { + b.Stdout = &streamformatter.StdoutFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} + b.Stderr = &streamformatter.StderrFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} + } + + if closeNotifier, ok := w.(http.CloseNotifier); ok { + finished := make(chan struct{}) + defer close(finished) + clientGone := closeNotifier.CloseNotify() + go func() { + select { + case <-finished: + case <-clientGone: + logrus.Infof("Client disconnected, cancelling job: build") + b.Cancel() + } + }() + } + + imgID, err := b.Build() + if err != nil { + return errf(err) + } + + for _, rt := range repoAndTags { + if err := br.backend.TagImage(rt, imgID); err != nil { + return errf(err) + } + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/backend.go new file mode 100644 index 00000000..80bed644 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/backend.go @@ -0,0 +1,74 @@ +package container + +import ( + "io" + "time" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*exec.Config, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig) error + ContainerStop(name string, seconds int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) + ContainerWait(name string, timeout time.Duration) (int, error) + Exists(id string) bool +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version version.Version) (interface{}, error) + ContainerLogs(name string, config *daemon.ContainerLogsConfig) error + ContainerStats(name string, config *daemon.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *daemon.ContainersConfig) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttachWithLogs(name string, c *daemon.ContainerAttachWithLogsConfig) error + ContainerWsAttachWithLogs(name string, c *daemon.ContainerWsAttachWithLogsConfig) error +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container.go new file mode 100644 index 00000000..dd931650 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container.go @@ -0,0 +1,66 @@ +package container + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" +) + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend) router.Router { + r := &containerRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + local.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + local.NewGetRoute("/containers/json", r.getContainersJSON), + local.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + local.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + local.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + local.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + local.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), + local.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), + local.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + local.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + local.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + local.NewPostRoute("/containers/create", r.postContainersCreate), + local.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + local.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + local.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + local.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + local.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + local.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + local.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + local.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + local.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + local.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), + local.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + local.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + local.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + local.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + local.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + // PUT + local.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + local.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container_routes.go new file mode 100644 index 00000000..4e2ffca2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container_routes.go @@ -0,0 +1,503 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/daemon" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + config := &daemon.ContainersConfig{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: r.Form.Get("filters"), + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + var out io.Writer + if !stream { + w.Header().Set("Content-Type", "application/json") + out = w + } else { + wf := ioutils.NewWriteFlusher(w) + out = wf + defer wf.Close() + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &daemon.ContainerStatsConfig{ + Stream: stream, + OutStream: out, + Stop: closeNotifier, + Version: httputils.VersionFromContext(ctx), + } + + return s.backend.ContainerStats(vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var since time.Time + if r.Form.Get("since") != "" { + s, n, err := timetypes.ParseTimestamps(r.Form.Get("since"), 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + containerName := vars["name"] + + if !s.backend.Exists(containerName) { + return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) + } + + // write an empty chunk of data (this is to ensure that the + // HTTP Response is sent immediately, even if the container has + // not yet produced any data) + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + logsConfig := &daemon.ContainerLogsConfig{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: since, + Tail: r.Form.Get("tail"), + UseStdout: stdout, + UseStderr: stderr, + OutStream: output, + Stop: closeNotifier, + } + + if err := s.backend.ContainerLogs(containerName, logsConfig); err != nil { + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %s\n", utils.GetErrorMessage(err)) + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *container.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + theErr, isDerr := err.(errcode.ErrorCoder) + isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, utils.GetErrorMessage(err)) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + } + + name := vars["name"] + warnings, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ + Warnings: warnings, + }) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := version.LessThan("1.19") + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such container: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + + keys := []byte{} + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ + Hijacker: w.(http.Hijacker), + Upgrade: upgrade, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + } + + return s.backend.ContainerAttachWithLogs(containerName, attachWithLogsConfig) +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + if !s.backend.Exists(containerName) { + return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) + } + + var keys []byte + var err error + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + + wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ + InStream: ws, + OutStream: ws, + ErrStream: ws, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + } + + if err := s.backend.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil { + logrus.Errorf("Error attaching websocket: %s", utils.GetErrorMessage(err)) + } + }) + ws := websocket.Server{Handler: h, Handshake: nil} + ws.ServeHTTP(w, r) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/copy.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/copy.go new file mode 100644 index 00000000..69584b31 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/copy.go @@ -0,0 +1,112 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/exec.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/exec.go new file mode 100644 index 00000000..caa5da06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/exec.go @@ -0,0 +1,135 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/utils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %s", name, utils.GetErrorMessage(err)) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if version.GreaterThan("1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } else { + outStream = w + } + + // Now run the user process in container. + if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + logrus.Errorf("Error running exec in container: %v\n", utils.GetErrorMessage(err)) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/inspect.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/inspect.go new file mode 100644 index 00000000..e3bb09a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects containers configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/image.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/image.go new file mode 100644 index 00000000..c1d1e830 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/image.go @@ -0,0 +1,375 @@ +package local + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder/dockerfile" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + if !s.daemon.Exists(cname) { + return derr.ErrorCodeNoSuchContainer.WithArgs(cname) + } + + newConfig, err := dockerfile.BuildFromConfig(c, r.Form["changes"]) + if err != nil { + return err + } + + commitCfg := &types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: newConfig, + MergeConfigs: true, + } + + imgID, err := s.daemon.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + var ( + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + var ref reference.Named + ref, err = reference.ParseNamed(image) + if err == nil { + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(ref, dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + } + if err == nil { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + err = s.daemon.PullImage(ref, metaHeaders, authConfig, output) + } + } + } else { //import + var newRef reference.Named + if repo != "" { + var err error + newRef, err = reference.ParseNamed(repo) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + src := r.Form.Get("fromSrc") + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *container.Config + newConfig, err = dockerfile.BuildFromConfig(&container.Config{}, r.Form["changes"]) + if err != nil { + return err + } + + err = s.daemon.ImportImage(src, newRef, message, r.Body, output, newConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *router) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + ref, err := reference.ParseNamed(vars["name"]) + if err != nil { + return err + } + tag := r.Form.Get("tag") + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.daemon.PushImage(ref, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *router) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.daemon.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *router) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.daemon.LoadImage(r.Body, w) +} + +func (s *router) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.daemon.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *router) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.daemon.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *router) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // FIXME: The filter parameter could just be a match filter + images, err := s.daemon.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *router) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.daemon.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *router) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + newTag, err := reference.WithName(repo) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + if err := s.daemon.TagImage(newTag, vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *router) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.daemon.SearchRegistryForImages(r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/local.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/local.go new file mode 100644 index 00000000..ed07f977 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/local.go @@ -0,0 +1,107 @@ +package local + +import ( + "github.com/docker/docker/api/server/httputils" + dkrouter "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon" +) + +// router is a docker router that talks with the local docker daemon. +type router struct { + daemon *daemon.Daemon + routes []dkrouter.Route +} + +// localRoute defines an individual API route to connect with the docker daemon. +// It implements router.Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local router for the reouter +func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS +func NewOptionsRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("HEAD", path, handler) +} + +// NewRouter initializes a local router with a new daemon. +func NewRouter(daemon *daemon.Daemon) dkrouter.Router { + r := &router{ + daemon: daemon, + } + r.initRoutes() + return r +} + +// Routes returns the list of routes registered in the router. +func (r *router) Routes() []dkrouter.Route { + return r.routes +} + +// initRoutes initializes the routes in this router +func (r *router) initRoutes() { + r.routes = []dkrouter.Route{ + // OPTIONS + // GET + NewGetRoute("/images/json", r.getImagesJSON), + NewGetRoute("/images/search", r.getImagesSearch), + NewGetRoute("/images/get", r.getImagesGet), + NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + NewPostRoute("/commit", r.postCommit), + NewPostRoute("/images/create", r.postImagesCreate), + NewPostRoute("/images/load", r.postImagesLoad), + NewPostRoute("/images/{name:.*}/push", r.postImagesPush), + NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + // DELETE + NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/backend.go new file mode 100644 index 00000000..c6ea0adc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "github.com/docker/engine-api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented to provide +// network specific functionality +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetwork(idName string, by int) (libnetwork.Network, error) + GetNetworksByID(partialID string) []libnetwork.Network + GetAllNetworks() []libnetwork.Network + CreateNetwork(name, driver string, ipam network.IPAM, + options map[string]string, internal bool) (libnetwork.Network, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, + network libnetwork.Network, force bool) error + NetworkControllerEnabled() bool + DeleteNetwork(name string) error +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/filter.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/filter.go new file mode 100644 index 00000000..31d8d0c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/filter.go @@ -0,0 +1,110 @@ +package network + +import ( + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types/filters" + "github.com/docker/libnetwork" +) + +type filterHandler func([]libnetwork.Network, string) ([]libnetwork.Network, error) + +var ( + // supportedFilters predefined some supported filter handler function + supportedFilters = map[string]filterHandler{ + "type": filterNetworkByType, + "name": filterNetworkByName, + "id": filterNetworkByID, + } + + // acceptFilters is an acceptable filter flag list + // generated for validation. e.g. + // acceptedFilters = map[string]bool{ + // "type": true, + // "name": true, + // "id": true, + // } + acceptedFilters = func() map[string]bool { + ret := make(map[string]bool) + for k := range supportedFilters { + ret[k] = true + } + return ret + }() +) + +func filterNetworkByType(nws []libnetwork.Network, netType string) (retNws []libnetwork.Network, err error) { + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name()) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name()) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +func filterNetworkByName(nws []libnetwork.Network, name string) (retNws []libnetwork.Network, err error) { + for _, nw := range nws { + // exact match (fast path) + if nw.Name() == name { + retNws = append(retNws, nw) + continue + } + + // regexp match (slow path) + match, err := regexp.MatchString(name, nw.Name()) + if err != nil || !match { + continue + } else { + retNws = append(retNws, nw) + } + } + return retNws, nil +} + +func filterNetworkByID(nws []libnetwork.Network, id string) (retNws []libnetwork.Network, err error) { + for _, nw := range nws { + if strings.HasPrefix(nw.ID(), id) { + retNws = append(retNws, nw) + } + } + return retNws, nil +} + +// filterAllNetworks filter network list according to user specified filter +// and return user chosen networks +func filterNetworks(nws []libnetwork.Network, filter filters.Args) ([]libnetwork.Network, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + var displayNet []libnetwork.Network + for fkey, fhandler := range supportedFilters { + errFilter := filter.WalkValues(fkey, func(fval string) error { + passList, err := fhandler(nws, fval) + if err != nil { + return err + } + displayNet = append(displayNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + } + return displayNet, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network.go new file mode 100644 index 00000000..5fcb252c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network.go @@ -0,0 +1,56 @@ +package network + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" + "github.com/docker/docker/errors" + "golang.org/x/net/context" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend) router.Router { + r := &networkRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + local.NewGetRoute("/networks", r.controllerEnabledMiddleware(r.getNetworksList)), + local.NewGetRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.getNetwork)), + // POST + local.NewPostRoute("/networks/create", r.controllerEnabledMiddleware(r.postNetworkCreate)), + local.NewPostRoute("/networks/{id:.*}/connect", r.controllerEnabledMiddleware(r.postNetworkConnect)), + local.NewPostRoute("/networks/{id:.*}/disconnect", r.controllerEnabledMiddleware(r.postNetworkDisconnect)), + // DELETE + local.NewDeleteRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.deleteNetwork)), + } +} + +func (r *networkRouter) controllerEnabledMiddleware(handler httputils.APIFunc) httputils.APIFunc { + if r.backend.NetworkControllerEnabled() { + return handler + } + return networkControllerDisabled +} + +func networkControllerDisabled(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return errors.ErrorNetworkControllerNotEnabled.WithArgs() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network_routes.go new file mode 100644 index 00000000..85fa88db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network_routes.go @@ -0,0 +1,265 @@ +package network + +import ( + "encoding/json" + "fmt" + "net/http" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/daemon" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" + "github.com/docker/libnetwork" +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + if netFilters.Len() != 0 { + if err := netFilters.Validate(acceptedFilters); err != nil { + return err + } + } + + list := []*types.NetworkResource{} + + nwList := n.backend.GetAllNetworks() + displayable, err := filterNetworks(nwList, netFilters) + if err != nil { + return err + } + + for _, nw := range displayable { + list = append(list, buildNetworkResource(nw)) + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw)) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreate + var warning string + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(create.Name) { + return httputils.WriteJSON(w, http.StatusForbidden, + fmt.Sprintf("%s is a pre-defined network and cannot be created", create.Name)) + } + + nw, err := n.backend.GetNetwork(create.Name, daemon.NetworkByName) + if _, ok := err.(libnetwork.ErrNoSuchNetwork); err != nil && !ok { + return err + } + if nw != nil { + if create.CheckDuplicate { + return libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + nw, err = n.backend.CreateNetwork(create.Name, create.Driver, create.IPAM, create.Options, create.Internal) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.NetworkCreateResponse{ + ID: nw.ID(), + Warning: warning, + }) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, nw.Name(), connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw, disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return n.backend.DeleteNetwork(vars["id"]) +} + +func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + r.Name = nw.Name() + r.ID = nw.ID() + r.Scope = nw.Info().Scope() + r.Driver = nw.Type() + r.Options = nw.Info().DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, nw) + + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + if sb == nil { + continue + } + + r.Containers[sb.ContainerID()] = buildEndpointResource(e) + } + return r +} + +func buildIpamResources(r *types.NetworkResource, nw libnetwork.Network) { + id, opts, ipv4conf, ipv6conf := nw.Info().IpamConfig() + + ipv4Info, ipv6Info := nw.Info().IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource { + er := types.EndpointResource{} + if e == nil { + return er + } + + er.EndpointID = e.ID() + er.Name = e.Name() + ei := e.Info() + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/router.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 00000000..f3efa82f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,18 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add the the docker server. +type Router interface { + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/backend.go new file mode 100644 index 00000000..8a270027 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/backend.go @@ -0,0 +1,17 @@ +package system + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SubscribeToEvents(since, sinceNano int64, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(authConfig *types.AuthConfig) (string, error) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system.go new file mode 100644 index 00000000..0f46eda2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system.go @@ -0,0 +1,37 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" +) + +// systemRouter is a Router that provides information about +// the Docker system overall. It gathers information about +// host, daemon and container events. +type systemRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new systemRouter +func NewRouter(b Backend) router.Router { + r := &systemRouter{ + backend: b, + } + + r.routes = []router.Route{ + local.NewOptionsRoute("/", optionsHandler), + local.NewGetRoute("/_ping", pingHandler), + local.NewGetRoute("/events", r.getEvents), + local.NewGetRoute("/info", r.getInfo), + local.NewGetRoute("/version", r.getVersion), + local.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes return all the API routes dedicated to the docker system. +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system_routes.go new file mode 100644 index 00000000..40ec748a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system_routes.go @@ -0,0 +1,132 @@ +package system + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion.String() + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) + if err != nil { + return err + } + until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) + if err != nil { + return err + } + + timer := time.NewTimer(0) + timer.Stop() + if until > 0 || untilNano > 0 { + dur := time.Unix(until, untilNano).Sub(time.Now()) + timer = time.NewTimer(dur) + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + // This is to ensure that the HTTP status code is sent immediately, + // so that it will not block the receiver. + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timer.C: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, err := s.backend.AuthenticateToRegistry(config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 00000000..ede5dc4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,16 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/engine-api/types" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, + opts map[string]string) (*types.Volume, error) + VolumeRm(name string) error +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 00000000..8bd5c129 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,38 @@ +package volume + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" +) + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volumeRouter +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +//Routes returns the available routers to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + local.NewGetRoute("/volumes", r.getVolumesList), + local.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + local.NewPostRoute("/volumes/create", r.postVolumesCreate), + // DELETE + local.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 00000000..feef77cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,66 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req types.VolumeCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := v.backend.VolumeRm(vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router_swapper.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router_swapper.go new file mode 100644 index 00000000..b5f1d06d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allow you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go new file mode 100644 index 00000000..aa7905b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go @@ -0,0 +1,243 @@ +package server + +import ( + "crypto/tls" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/local" + "github.com/docker/docker/api/server/router/network" + "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/sockets" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + AuthorizationPluginNames []string + Version string + SocketGroup string + TLSConfig *tls.Config + Addrs []Addr +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + authZPlugins []authorization.Plugin + routerSwapper *routerSwapper +} + +// Addr contains string representation of address and its protocol (tcp, unix...). +type Addr struct { + Proto string + Addr string +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) (*Server, error) { + s := &Server{ + cfg: cfg, + } + for _, addr := range cfg.Addrs { + srv, err := s.newServer(addr.Proto, addr.Addr) + if err != nil { + return nil, err + } + logrus.Debugf("Server created for HTTP on %s (%s)", addr.Proto, addr.Addr) + s.servers = append(s.servers, srv...) + } + return s, nil +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Server method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + s.initRouterSwapper() + + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") +} + +func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) { + if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + if l, err = sockets.NewTCPSocket(addr, s.cfg.TLSConfig); err != nil { + return nil, err + } + if err := allocateDaemonPort(addr); err != nil { + return nil, err + } + return +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the handler call + logrus.Debugf("Calling %s %s", r.Method, r.URL.Path) + + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.Background() + handlerFunc := s.handleWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL.Path, utils.GetErrorMessage(err)) + httputils.WriteError(w, err) + } + } +} + +// InitRouters initializes a list of routers for the server. +func (s *Server) InitRouters(d *daemon.Daemon) { + s.addRouter(container.NewRouter(d)) + s.addRouter(local.NewRouter(d)) + s.addRouter(network.NewRouter(d)) + s.addRouter(system.NewRouter(d)) + s.addRouter(volume.NewRouter(d)) + s.addRouter(build.NewRouter(d)) +} + +// addRouter adds a new router to the server. +func (s *Server) addRouter(r router.Router) { + s.routers = append(s.routers, r) +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + if utils.IsDebugEnabled() { + profilerSetup(m, "/debug/") + } + + logrus.Debugf("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +func (s *Server) initRouterSwapper() { + s.routerSwapper = &routerSwapper{ + router: s.createMux(), + } +} + +// Reload reads configuration changes and modifies the +// server according to those changes. +// Currently, only the --debug configuration is taken into account. +func (s *Server) Reload(config *daemon.Config) { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + s.routerSwapper.Swap(s.createMux()) + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + s.routerSwapper.Swap(s.createMux()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go new file mode 100644 index 00000000..a4fc6395 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go @@ -0,0 +1,132 @@ +// +build freebsd linux + +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/sockets" + "github.com/docker/libnetwork/portallocator" + + systemdActivation "github.com/coreos/go-systemd/activation" +) + +// newServer sets up the required HTTPServers and does protocol specific checking. +// newServer does not set any muxers, you should set it later to Handler field +func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { + var ( + err error + ls []net.Listener + ) + switch proto { + case "fd": + ls, err = listenFD(addr, s.cfg.TLSConfig) + if err != nil { + return nil, err + } + case "tcp": + l, err := s.initTCPSocket(addr) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, s.cfg.SocketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + var res []*HTTPServer + for _, l := range ls { + res = append(res, &HTTPServer{ + &http.Server{ + Addr: addr, + }, + l, + }) + } + return res, nil +} + +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = systemdActivation.TLSListeners(false, tlsConfig) + } else { + listeners, err = systemdActivation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("No sockets found") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("Too few socket activated files passed in") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go new file mode 100644 index 00000000..826dd2e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package server + +import ( + "errors" + "net" + "net/http" +) + +// NewServer sets up the required Server and does protocol specific checking. +func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { + var ( + ls []net.Listener + ) + switch proto { + case "tcp": + l, err := s.initTCPSocket(addr) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, errors.New("Invalid protocol format. Windows only supports tcp.") + } + + var res []*HTTPServer + for _, l := range ls { + res = append(res, &HTTPServer{ + &http.Server{ + Addr: addr, + }, + l, + }) + } + return res, nil + +} + +func allocateDaemonPort(addr string) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE new file mode 100644 index 00000000..5b6e7c66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000..e67cdabd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 00000000..9b8c4de9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,567 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + incompatibleFsMagic = []graphdriver.FsMagic{ + graphdriver.FsMagicBtrfs, + graphdriver.FsMagicAufs, + } + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +type data struct { + referenceCount int + path string +} + +// Driver contains information about the filesystem mounted. +// root of the filesystem +// sync.Mutex to protect against concurrent modifications +// active maps mount id to the count +type Driver struct { + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + sync.Mutex // Protects concurrent modification to active + active map[string]*data +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + for _, magic := range incompatibleFsMagic { + if fsMagic == magic { + return nil, graphdriver.ErrIncompatibleFS + } + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + active: make(map[string]*data), + uidMaps: uidMaps, + gidMaps: gidMaps, + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent, mountLabel string) error { + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + a.active[id] = &data{} + return nil +} + +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + m := a.active[id] + if m != nil { + if m.referenceCount > 0 { + return nil + } + // Make sure the dir is umounted first + if err := a.unmount(m); err != nil { + return err + } + } + tmpDirs := []string{ + "mnt", + "diff", + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + for _, p := range tmpDirs { + realPath := path.Join(a.rootPath(), p, id) + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpPath) + } + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + ids, err := getParentIds(a.rootPath(), id) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + ids = []string{} + } + + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + m := a.active[id] + if m == nil { + m = &data{} + a.active[id] = m + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + m.path = path.Join(a.rootPath(), "diff", id) + if len(ids) > 0 { + m.path = path.Join(a.rootPath(), "mnt", id) + if m.referenceCount == 0 { + if err := a.mount(id, m, mountLabel); err != nil { + return "", err + } + } + } + m.referenceCount++ + return m.path, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + m := a.active[id] + if m == nil { + // but it might be still here + if a.Exists(id) { + path := path.Join(a.rootPath(), "mnt", id) + err := Unmount(path) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + } + return nil + } + if count := m.referenceCount; count > 1 { + m.referenceCount = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(m) + } + delete(a.active, id) + } + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffPath returns path to the directory that contains files for the layer +// differences. Used for direct access for tar-split. +func (a *Driver) DiffPath(id string) (string, func() error, error) { + return path.Join(a.rootPath(), "diff", id), func() error { return nil }, nil +} + +func (a *Driver) applyDiff(id string, diff archive.Reader) error { + dir := path.Join(a.rootPath(), "diff", id) + if err := chrootarchive.UntarUncompressed(diff, dir, &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }); err != nil { + return err + } + + // show invalid whiteouts warning. + files, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir)) + if err == nil && len(files) > 0 { + logrus.Warnf("Archive contains aufs hardlink references that are not supported.") + } + return nil +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, m *data, mountLabel string) error { + // If the id is mounted or we get an error return + if mounted, err := a.mounted(m); err != nil || mounted { + return err + } + + var ( + target = m.path + rw = path.Join(a.rootPath(), "diff", id) + ) + + layers, err := a.getParentLayerPaths(id) + if err != nil { + return err + } + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(m *data) error { + if mounted, err := a.mounted(m); err != nil || !mounted { + return err + } + return Unmount(m.path) +} + +func (a *Driver) mounted(m *data) (bool, error) { + return mountpk.Mounted(m.path) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + for id, m := range a.active { + if err := a.unmount(m); err != nil { + logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + } + + if firstMount { + opts := "dio,noplink,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } + + if i == len(ro) { + break + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 00000000..08f1ffc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,48 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go new file mode 100644 index 00000000..d7e9bf9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Errorf("Couldn't run auplink before unmount: %s", err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 00000000..8062bae4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 00000000..d030b066 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 00000000..86aa631b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,322 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent, mountLabel string) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent, "") + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 00000000..f0708888 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version.go new file mode 100644 index 00000000..73d90cdd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 00000000..f802fbc6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md new file mode 100644 index 00000000..fa9a501c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,97 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +#### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +### About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/reference/commandline/daemon/#docker- +execdriver-option). If you add an options, update both the `man` page and the +documentation. diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 00000000..d8522349 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2530 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + mountCount int + mountPath string + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debugf("devmapper: constructDeviceIDMap()") + defer logrus.Debugf("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debugf("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemoval(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesnt support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debugf("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, baseInfo.Size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debugf("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debugf("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed.%v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debugf("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debugf("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debugf("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debugf("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Warn("devmapper: Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/daemon/#daemon-storage-driver-option") + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debugf("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warnf("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // If mountcount is not zero, that means devices is still in use + // or has not been Put() properly. Fail device deletion. + + if info.mountCount != 0 { + return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount) + } + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debugf("devmapper: deactivatePool()") + defer logrus.Debugf("devmapper: deactivatePool END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err == nil { + break + } + + if err == devicemapper.ErrEnxio { + // Device is probably already gone. Return success. + return nil + } + + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown() error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + var devs []*devInfo + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + for _, info := range devices.Devices { + devs = append(devs, info) + } + devices.Unlock() + + for _, info := range devs { + info.lock.Lock() + if info.mountCount > 0 { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err) + } + + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err) + } + devices.Unlock() + } + info.lock.Unlock() + } + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path) + } + + info.mountCount++ + return nil + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + info.mountCount = 1 + info.mountPath = path + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // If there are running containers when daemon crashes, during daemon + // restarting, it will kill running containers and will finally call + // Put() without calling Get(). So info.MountCount may become negative. + // if info.mountCount goes negative, we do the unmount and assign + // it to 0. + + info.mountCount-- + if info.mountCount > 0 { + return nil + } else if info.mountCount < 0 { + logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0") + info.mountCount = 0 + } + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debugf("devmapper: Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + info.mountPath = "" + + return nil +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 00000000..9ab3e4f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 00000000..c03a7730 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,204 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown() + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent, mountLabel string) error { + if err := d.DeviceSet.AddDevice(id, parent); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + return "", err + } + + rootFs := path.Join(mp, "rootfs") + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + mp := path.Join(d.home, "mnt", id) + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 00000000..cca1fe1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 00000000..d9ab839c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,218 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // Create creates a new, empty, filesystem layer with the + // specified id and parent and mountLabel. Parent and mountLabel may be "". + Create(id, parent, mountLabel string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers a InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + if pluginDriver, err := lookupPlugin(name, home, options); err == nil { + return pluginDriver, nil + } + logrus.Errorf("Failed to GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// New creates the driver and initializes it at the specified root. +func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (driver Driver, err error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, root, options, uidMaps, gidMaps) + } + + // Guess for prior driver + priorDrivers := scanPriorDrivers(root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + for _, prior := range priorDrivers { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + if prior == name { + driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + return nil, err + } + if err := checkPriorDriver(name, root); err != nil { + return nil, err + } + logrus.Infof("[graphdriver] using prior storage driver %q", name) + return driver, nil + } + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for _, initFunc := range drivers { + if driver, err = initFunc(root, options, uidMaps, gidMaps); err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) []string { + priorDrivers := []string{} + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + priorDrivers = append(priorDrivers, driver) + } + } + return priorDrivers +} + +func checkPriorDriver(name, root string) error { + priorDrivers := []string{} + for _, prior := range scanPriorDrivers(root) { + if prior != name && prior != "vfs" { + if _, err := os.Stat(filepath.Join(root, prior)); err == nil { + priorDrivers = append(priorDrivers, prior) + } + } + } + + if len(priorDrivers) > 0 { + + return fmt.Errorf("%q contains other graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(priorDrivers, ",")) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 00000000..be4eb526 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,8 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 00000000..410a62ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,85 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "devicemapper", + "overlay", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 00000000..b3f68573 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 00000000..6c09affa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,16 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + "windowsdiff", + "vfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 00000000..5a349325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,162 @@ +package graphdriver + +import ( + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debugf("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 00000000..534f2e58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,299 @@ +// +build linux freebsd + +package graphtest + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root, nil, nil, nil) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return a existing driver with the name updating the reference count. +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } + +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} + +// DriverTestCreateEmpty creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", "", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, "", ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base", ""); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 00000000..a50c5211 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/copy.go new file mode 100644 index 00000000..def4d439 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,169 @@ +// +build linux + +package overlay + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 00000000..59131bba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,476 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// ActiveMount contains information about the count, path and whether is mounted or not. +// This information is part of the Driver, that contains list of active mounts that are part of this overlay. +type ActiveMount struct { + count int + path string + mounted bool +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + sync.Mutex // Protects concurrent modification to active + active map[string]*ActiveMount + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +var backingFs = "" + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs or aufs + switch fsMagic { + case graphdriver.FsMagicBtrfs: + logrus.Error("'overlay' is not supported over btrfs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicAufs: + logrus.Error("'overlay' is not supported over aufs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicZfs: + logrus.Error("'overlay' is not supported over zfs.") + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + active: make(map[string]*ActiveMount), + uidMaps: uidMaps, + gidMaps: gidMaps, + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup simply returns nil and do not change the existing filesystem. +// This is required to satisfy the graphdriver.Driver interface. +func (d *Driver) Cleanup() error { + return nil +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string) (retErr error) { + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (string, error) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount != nil { + mount.count++ + return mount.path, nil + } + + mount = &ActiveMount{count: 1} + + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + mount.path = rootDir + d.active[id] = mount + return mount.path, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerID)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + mount.path = mergedDir + mount.mounted = true + d.active[id] = mount + + return mount.path, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount == nil { + logrus.Debugf("Put on a non-mounted device %s", id) + // but it might be still here + if d.Exists(id) { + mergedDir := path.Join(d.dir(id), "merged") + err := syscall.Unmount(mergedDir, 0) + if err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + } + return nil + } + + mount.count-- + if mount.count > 0 { + return nil + } + + defer delete(d.active, id) + if mount.mounted { + err := syscall.Unmount(mount.path, 0) + if err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return a ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 00000000..d63161b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,32 @@ +// +build experimental + +package graphdriver + +import ( + "fmt" + "io" + + "github.com/docker/docker/pkg/plugins" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + pl, err := plugins.Get(name, "GraphDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, home, opts, pl.Client) +} + +func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { + proxy := &graphDriverProxy{name, c} + return proxy, proxy.Init(home, opts) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go new file mode 100644 index 00000000..daa7a170 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go @@ -0,0 +1,7 @@ +// +build !experimental + +package graphdriver + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + return nil, ErrNotSupported +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/proxy.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 00000000..47128473 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,210 @@ +// +build experimental + +package graphdriver + +import ( + "errors" + "fmt" + + "github.com/docker/docker/pkg/archive" +) + +type graphDriverProxy struct { + name string + client pluginClient +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string +} + +func (d *graphDriverProxy) Init(home string, opts []string) error { + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Create(id, parent, mountLabel string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return ret.Dir, err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + body.Close() + return nil, err + } + return archive.Archive(body), nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 00000000..262954d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 00000000..f456cc5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 00000000..bb2e9ef5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 00000000..3a952642 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 00000000..98fad23b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_windows.go new file mode 100644 index 00000000..efaa5005 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 00000000..8c31c415 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 00000000..00d9f8ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,135 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent, mountLabel string) error { + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/windows/windows.go new file mode 100644 index 00000000..05942252 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/windows/windows.go @@ -0,0 +1,670 @@ +//+build windows + +package windows + +import ( + "crypto/sha512" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/random" + "github.com/microsoft/hcsshim" +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + graphdriver.Register("windowsdiff", InitDiff) +} + +const ( + // diffDriver is an hcsshim driver type + diffDriver = iota + // filterDriver is an hcsshim driver type + filterDriver +) + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + // Mutex protects concurrent modification to active + sync.Mutex + // active stores references to the activated layers + active map[string]int +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + active: make(map[string]int), + } + return d, nil +} + +// InitDiff returns a new Windows differencing disk driver. +func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitDiff at %s", home) + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: diffDriver, + }, + active: make(map[string]int), + } + return d, nil +} + +// String returns the string representation of a driver. +func (d *Driver) String() string { + switch d.info.Flavour { + case diffDriver: + return "windowsdiff" + case filterDriver: + return "windowsfilter" + default: + return "Unknown driver flavour" + } +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// Create creates a new layer with the given id. +func (d *Driver) Create(id, parent, mountLabel string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + parentIsInit := strings.HasSuffix(rPId, "-init") + + if !parentIsInit && rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + layerChain = []string{parentPath} + } + + layerChain = append(layerChain, parentChain...) + + if parentIsInit { + if len(layerChain) == 0 { + return fmt.Errorf("Cannot create a read/write layer without a parent layer.") + } + if err := hcsshim.CreateSandboxLayer(d.info, id, layerChain[0], layerChain); err != nil { + return err + } + } else { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + rID, err := d.resolveID(id) + if err != nil { + return err + } + os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail + return hcsshim.DestroyLayer(d.info, rID) +} + +// Get returns the rootfs path for the id. This will mount the dir at it's given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + d.Lock() + defer d.Unlock() + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + return "", err + } + + if d.active[rID] == 0 { + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + d.active[rID]++ + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + + d.Lock() + defer d.Unlock() + + if d.active[rID] > 1 { + d.active[rID]-- + } else if d.active[rID] == 1 { + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { + return err + } + delete(d.active, rID) + } + + return nil +} + +// Cleanup ensures the information the driver stores is properly removed. +func (d *Driver) Cleanup() error { + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (arch archive.Archive, err error) { + rID, err := d.resolveID(id) + if err != nil { + return + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + d.Lock() + + // To support export, a layer must be activated but not prepared. + if d.info.Flavour == filterDriver { + if d.active[rID] == 0 { + if err = hcsshim.ActivateLayer(d.info, rID); err != nil { + d.Unlock() + return + } + defer func() { + if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + }() + } else { + if err = hcsshim.UnprepareLayer(d.info, rID); err != nil { + d.Unlock() + return + } + defer func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err) + } + }() + } + } + + d.Unlock() + + return d.exportLayer(rID, layerChain) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + return nil, fmt.Errorf("The Windows graphdriver does not support Changes()") +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + if d.info.Flavour == diffDriver { + start := time.Now().UTC() + logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer") + destination := d.dir(id) + destination = filepath.Dir(destination) + if size, err = chrootarchive.ApplyUncompressedLayer(destination, diff, nil); err != nil { + return + } + logrus.Debugf("WindowsGraphDriver ApplyDiff: Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return + } + layerChain := []string{parentPath} + layerChain = append(layerChain, parentChain...) + + if size, err = d.importLayer(id, diff, layerChain); err != nil { + return + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return + } + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// CustomImageInfo is the object returned by the driver describing the base +// image. +type CustomImageInfo struct { + ID string + Name string + Version string + Path string + Size int64 + CreatedTime time.Time +} + +// GetCustomImageInfos returns the image infos for window specific +// base images which should always be present. +func (d *Driver) GetCustomImageInfos() ([]CustomImageInfo, error) { + strData, err := hcsshim.GetSharedBaseImages() + if err != nil { + return nil, fmt.Errorf("Failed to restore base images: %s", err) + } + + type customImageInfoList struct { + Images []CustomImageInfo + } + + var infoData customImageInfoList + + if err = json.Unmarshal([]byte(strData), &infoData); err != nil { + err = fmt.Errorf("JSON unmarshal returned error=%s", err) + logrus.Error(err) + return nil, err + } + + var images []CustomImageInfo + + for _, imageData := range infoData.Images { + folderName := filepath.Base(imageData.Path) + + // Use crypto hash of the foldername to generate a docker style id. + h := sha512.Sum384([]byte(folderName)) + id := fmt.Sprintf("%x", h[:32]) + + if err := d.Create(id, "", ""); err != nil { + return nil, err + } + // Create the alternate ID file. + if err := d.setID(id, folderName); err != nil { + return nil, err + } + + imageData.ID = id + images = append(images, imageData) + } + + return images, nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (arch archive.Archive, err error) { + layerFolder := d.dir(id) + + tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) + if err = os.MkdirAll(tempFolder, 0755); err != nil { + logrus.Errorf("Could not create %s %s", tempFolder, err) + return + } + defer func() { + if err != nil { + _, folderName := filepath.Split(tempFolder) + if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + } + }() + + if err = hcsshim.ExportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { + return + } + + archive, err := archive.Tar(tempFolder, archive.Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + d.Put(id) + _, folderName := filepath.Split(tempFolder) + if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + return err + }), nil + +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { + layerFolder := d.dir(id) + + tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) + if err = os.MkdirAll(tempFolder, 0755); err != nil { + logrus.Errorf("Could not create %s %s", tempFolder, err) + return + } + defer func() { + _, folderName := filepath.Split(tempFolder) + if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + }() + + start := time.Now().UTC() + logrus.Debugf("Start untar layer") + if size, err = chrootarchive.ApplyLayer(tempFolder, layerData); err != nil { + return + } + err = copySysFiles(tempFolder, filepath.Join(d.info.HomeDir, "sysfile-backups", id)) + if err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + if err = hcsshim.ImportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { + return + } + + return +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + if err != nil { + return err + } + return nil +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +// DiffPath returns a directory that contains files needed to construct layer diff. +func (d *Driver) DiffPath(id string) (path string, release func() error, err error) { + id, err = d.resolveID(id) + if err != nil { + return + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(id) + if err != nil { + return + } + + layerFolder := d.dir(id) + tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) + if err = os.MkdirAll(tempFolder, 0755); err != nil { + logrus.Errorf("Could not create %s %s", tempFolder, err) + return + } + + defer func() { + if err != nil { + _, folderName := filepath.Split(tempFolder) + if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + } + }() + + if err = hcsshim.ExportLayer(d.info, id, tempFolder, layerChain); err != nil { + return + } + + err = copySysFiles(filepath.Join(d.info.HomeDir, "sysfile-backups", id), tempFolder) + if err != nil { + return + } + + return tempFolder, func() error { + // TODO: activate layers and release here? + _, folderName := filepath.Split(tempFolder) + return hcsshim.DestroyLayer(d.info, folderName) + }, nil +} + +var sysFileWhiteList = []string{ + "Hives\\*", + "Files\\BOOTNXT", + "tombstones.txt", +} + +// note this only handles files +func copySysFiles(src string, dest string) error { + if err := os.MkdirAll(dest, 0700); err != nil { + return err + } + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + for _, sysfile := range sysFileWhiteList { + if matches, err := filepath.Match(sysfile, rel); err != nil || !matches { + continue + } + + fi, err := os.Lstat(path) + if err != nil { + return err + } + + if !fi.Mode().IsRegular() { + continue + } + + targetPath := filepath.Join(dest, rel) + if err = os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { + return err + } + + in, err := os.Open(path) + if err != nil { + return err + } + out, err := os.Create(targetPath) + if err != nil { + in.Close() + return err + } + _, err = io.Copy(out, in) + in.Close() + out.Close() + if err != nil { + return err + } + } + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 00000000..9c270c54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 00000000..e0967983 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,333 @@ +// +build linux freebsd + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and a array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id string, parent string, mountLabel string) error { + err := d.create(id, parent) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent) +} + +func (d *Driver) create(id, parent string) error { + name := d.zfsPath(id) + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + return err + } + return d.cloneFilesystem(name, d.zfsPath(parent)) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + return "", err + } + + err = mount.Mount(filesystem, mountpoint, "zfs", options) + if err != nil { + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 00000000..1c05fa79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 00000000..52ed5160 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 00000000..643b169b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 00000000..a1f02a62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 00000000..beec19a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 00000000..b107de32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/metadata.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 00000000..9f744d46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,77 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + tempFilePath := path + ".tmp" + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil { + return err + } + return os.Rename(tempFilePath, path) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v1_id_service.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 00000000..f6e45892 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,44 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 00000000..239cd1f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,137 @@ +package metadata + +import ( + "encoding/json" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService struct { + store Store +} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) *V2MetadataService { + return &V2MetadataService{ + store: store, + } +} + +func (serv *V2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *V2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *V2MetadataService) Remove(metadata V2Metadata) error { + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull.go new file mode 100644 index 00000000..ab8c14ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,203 @@ +package distribution + +import ( + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + // MetaHeaders stores HTTP headers with metadata about the image + // (DockerHeaders with prefix X-Meta- in the request). + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // DownloadManager manages concurrent pulls. + DownloadManager *xfer.LayerDownloadManager +} + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := validateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + ) + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(registry.ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + continue + } + logrus.Debugf("Not continuing with error: %v", err) + return err + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return lastErr +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// validateRepoName validates the name of a repository. +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +// tmpFileClose creates a closer function for a temporary file that closes the file +// and also deletes it. +func tmpFileCloser(tmpFile *os.File) func() error { + return func() error { + tmpFile.Close() + if err := os.RemoveAll(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + + return nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v1.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 00000000..25e63343 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,335 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: registry.ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debugf("Retrieving the tag list") + var tagsList map[string]string + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Create(config) + if err != nil { + return err + } + + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + tmpFile, err := ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(tmpFile, reader) + if err != nil { + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + tmpFile.Seek(0, 0) + return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 00000000..7bb17100 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,701 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + +type v2Puller struct { + V2MetadataService *metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return fallbackError{err: err, confirmedV2: p.confirmedV2} + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if registry.ContinueOnError(err) { + logrus.Debugf("Error trying v2 registry: %v", err) + return fallbackError{err: err, confirmedV2: p.confirmedV2} + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService *metadata.V2MetadataService +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + blobs := ld.repo.Blobs(ctx) + + layerDownload, err := blobs.Open(ctx, ld.digest) + if err != nil { + logrus.Debugf("Error statting layer: %v", err) + if err == distribution.ErrBlobUnknown { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, retryOnError(err) + } + + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + // Restore the seek offset at the beginning of the stream. + _, err = layerDownload.Seek(0, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading") + defer reader.Close() + + verifier, err := digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + tmpFile, err := ioutil.TempFile("", "GetImageBlob") + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, verifier)) + if err != nil { + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + tmpFile.Close() + if err := os.RemoveAll(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + tmpFile.Seek(0, 0) + return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name()) + + var ( + imageID image.ID + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + oldTagImageID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagImageID == imageID { + return false, nil + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { + return false, err + } + } else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { + return false, err + } + + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { + return "", "", err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err = p.config.ImageStore.Create(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + imageID = image.ID(target.Digest) + if _, err := p.config.ImageStore.Get(imageID); err == nil { + // If the image already exists locally, no need to pull + // anything. + return imageID, manifestDigest, nil + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- err + cancel() + return + } + configChan <- configJSON + }() + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.References() { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + downloadRootFS image.RootFS // rootFS to use for registering layers. + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + if unmarshalledConfig.RootFS == nil { + return "", "", errors.New("image config has no rootfs section") + } + downloadRootFS = *unmarshalledConfig.RootFS + downloadRootFS.DiffIDs = []layer.DiffID{} + } else { + downloadRootFS = *image.NewRootFS() + } + + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + defer release() + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + } + + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range rootFS.DiffIDs { + if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + + imageID, err = p.config.ImageStore.Create(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(ref, manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + imageID, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return imageID, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) { + return fallbackError{err: err, confirmedV2: false} + } + } + case errcode.Error: + if registry.ShouldV2Fallback(v) { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_unix.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 00000000..9fbb875e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_windows.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_windows.go new file mode 100644 index 00000000..de99fc9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package distribution + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + v1img := &image.V1Image{} + if err := json.Unmarshal([]byte(m.History[len(m.History)-1].V1Compatibility), v1img); err != nil { + return err + } + if v1img.Parent == "" { + return fmt.Errorf("Last layer %q does not have a base layer reference", v1img.ID) + } + // There must be an image that already references the baselayer. + for _, img := range is.Map() { + if img.RootFS.BaseLayerID() == v1img.Parent { + rootFS.BaseLayer = img.RootFS.BaseLayer + return nil + } + } + return fmt.Errorf("Invalid base layer %q", v1img.Parent) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/push.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/push.go new file mode 100644 index 00000000..445f6bb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/push.go @@ -0,0 +1,196 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + // MetaHeaders store HTTP headers with metadata about the image + // (DockerHeaders with prefix X-Meta- in the request). + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the push + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // LayerStore manages layers. + LayerStore layer.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on the repository named localName. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("Repository does not exist: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + ) + + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + err = fallbackErr.err + lastErr = err + continue + } + } + + logrus.Debugf("Not continuing with error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +func compress(in io.Reader) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + }() + + return pipeReader +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v1.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 00000000..6ae2e43e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,454 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + ctx context.Context + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := digest.Digest(imageID).Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + imgID, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[image.ID]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) + + if _, present := imagesSeen[association.ImageID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ImageID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { + img, err := p.config.ImageStore.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + var l layer.Layer + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, l) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + } + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + p.config.LayerStore.Release(l) + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v2.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 00000000..68c8f69b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,417 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService *metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Download functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2} + } + + if err = p.pushV2Repository(ctx); err != nil { + if registry.ContinueOnError(err) { + return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2} + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + img, err := p.config.ImageStore.Get(imageID) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + var l layer.Layer + + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer layer.ReleaseAndLog(p.config.LayerStore, l) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + repoInfo: p.repoInfo, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(img.RootFS.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{client.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), ref.Tag(), img.RawJSON()) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer layer.Layer + v2MetadataService *metadata.V2MetadataService + repoInfo reference.Named + repo distribution.Repository + pushState *pushState +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.repo.Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error { + diffID := pd.DiffID() + + pd.pushState.Lock() + if _, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return nil + } + pd.pushState.Unlock() + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) + if err != nil { + progress.Update(progressOutput, pd.ID(), "Image push failed") + return retryOnError(err) + } + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = descriptor + pd.pushState.Unlock() + return nil + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var mountFrom metadata.V2Metadata + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + for _, metadata := range v2Metadata { + sourceRepo, err := reference.ParseNamed(metadata.SourceRepository) + if err != nil { + continue + } + if pd.repoInfo.Hostname() == sourceRepo.Hostname() { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName()) + mountFrom = metadata + break + } + } + + var createOpts []distribution.BlobCreateOption + + if mountFrom.SourceRepository != "" { + namedRef, err := reference.WithName(mountFrom.SourceRepository) + if err != nil { + return err + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + return err + } + + canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) + if err != nil { + return err + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // Send the layer + layerUpload, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return xfer.DoNotRetry{Err: err} + } + + return nil + } + if mountFrom.SourceRepository != "" { + // unable to mount layer from this repository, so this source mapping is no longer valid + logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) + pd.v2MetadataService.Remove(mountFrom) + } + + if err != nil { + return retryOnError(err) + } + defer layerUpload.Close() + + arch, err := pd.layer.TarStream() + if err != nil { + return xfer.DoNotRetry{Err: err} + } + + // don't care if this fails; best effort + size, _ := pd.layer.DiffSize() + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") + defer reader.Close() + compressedReader := compress(reader) + + digester := digest.Canonical.New() + tee := io.TeeReader(compressedReader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + compressedReader.Close() + if err != nil { + return retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return xfer.DoNotRetry{Err: err} + } + + pd.pushState.Lock() + + // If Commit succeded, that's an indication that the remote registry + // speaks the v2 protocol. + pd.pushState.confirmedV2 = true + + pd.pushState.remoteLayers[diffID] = distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Unlock() + + return nil +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + // Not necessary to lock pushStatus because this is always + // called after all the mutation in pushStatus. + // By the time this function is called, every layer will have + // an entry in remoteLayers. + return pd.pushState.remoteLayers[pd.DiffID()] +} + +// layerAlreadyExists checks if the registry already know about any of the +// metadata passed in the "metadata" slice. If it finds one that the registry +// knows about, it returns the known digest and "true". +func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { + for _, meta := range metadata { + // Only check blobsums that are known to this repository or have an unknown source + if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { + continue + } + descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) + switch err { + case nil: + descriptor.MediaType = schema2.MediaTypeLayer + return descriptor, true, nil + case distribution.ErrBlobUnknown: + // nop + default: + return distribution.Descriptor{}, false, err + } + } + return distribution.Descriptor{}, false, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/registry.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/registry.go new file mode 100644 index 00000000..1d4a2c4e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.err.Error() +} + +type dumbCredentialStore struct { + auth *types.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +// NewV2Repository returns a repository (v2 only). It creates a HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + modifiers := registry.DockerHeaders(metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + v2Version := auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + + versions := auth.APIVersions(resp, registry.DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundVersion = true + break + } + } + + challengeManager := auth.NewSimpleChallengeManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundVersion, err + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := dumbCredentialStore{auth: authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repo, err = client.NewRepository(ctx, repoName, endpoint.URL, tr) + return repo, foundVersion, err +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + return retryOnError(v[0]) + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: + return xfer.DoNotRetry{Err: err} + } + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/download.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 00000000..69c8bad0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,420 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { layer.ReleaseAndLog(ldm.layerStore, topLayer) }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d seconds", delay) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/transfer.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 00000000..7f323c1d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,343 @@ +package xfer + +import ( + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Cancel() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // hasWatchers stays open until all watchers release the transfer. + hasWatchers chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + hasWatchers: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + if t.broadcastDone { + close(w.running) + return w + } + + t.watchers[w.releaseChan] = w + + go func() { + defer func() { + close(w.running) + }() + done := false + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // This might write the last progress item a + // second time (since channel closure also gets + // us here), but that's fine. + if hasLastProgress { + progressOutput.WriteProgress(lastProgress) + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + close(t.hasWatchers) + t.cancel() + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer. +func (t *transfer) Released() <-chan struct{} { + return t.hasWatchers +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Cancel cancels the context associated with the transfer. +func (t *transfer) Cancel() { + t.cancel() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + if xfer, present := tm.transfers[key]; present { + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + return xfer, watcher + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/upload.go b/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 00000000..8da6a89e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,154 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + diffID layer.DiffID + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) error +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]struct{}) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + dedupDescriptors[key] = struct{}{} + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + diffID: descriptor.DiffID(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d seconds", delay) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/dockerversion/version_lib.go b/Godeps/_workspace/src/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 00000000..1d5862d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + + IAmStatic string = "library-import" + InitSHA1 string = "library-import" + InitPath string = "library-import" +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/fs.go b/Godeps/_workspace/src/github.com/docker/docker/image/fs.go new file mode 100644 index 00000000..72c9ab42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/fs.go @@ -0,0 +1,184 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" +) + +// IDWalkFunc is function called by StoreBackend.Walk +type IDWalkFunc func(id ID) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f IDWalkFunc) error + Get(id ID) ([]byte, error) + Set(data []byte) (ID, error) + Delete(id ID) error + SetMetadata(id ID, key string, data []byte) error + GetMetadata(id ID, key string) ([]byte, error) + DeleteMetadata(id ID, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + return s, nil +} + +func (s *fs) contentFile(id ID) string { + dgst := digest.Digest(id) + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(id ID) string { + dgst := digest.Digest(id) + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f IDWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(ID(dgst)); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given ID. +func (s *fs) Get(id ID) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(id) +} + +func (s *fs) get(id ID) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(id)) + if err != nil { + return nil, err + } + + // todo: maybe optional + if ID(digest.FromBytes(content)) != id { + return nil, fmt.Errorf("failed to verify image: %v", id) + } + + return content, nil +} + +// Set stores content under a given ID. +func (s *fs) Set(data []byte) (ID, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("Invalid empty data") + } + + id := ID(digest.FromBytes(data)) + filePath := s.contentFile(id) + tempFilePath := s.contentFile(id) + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return "", err + } + if err := os.Rename(tempFilePath, filePath); err != nil { + return "", err + } + + return id, nil +} + +// Delete removes content and metadata files associated with the ID. +func (s *fs) Delete(id ID) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(id)); err != nil { + return err + } + if err := os.Remove(s.contentFile(id)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(id ID, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(id); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(id)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + filePath := filepath.Join(s.metadataDir(id), key) + tempFilePath := filePath + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return err + } + return os.Rename(tempFilePath, filePath) +} + +// GetMetadata returns metadata for a given ID. +func (s *fs) GetMetadata(id ID, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(id); err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key)) +} + +// DeleteMetadata removes the metadata associated with an ID. +func (s *fs) DeleteMetadata(id ID, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(id), key)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/image.go b/Godeps/_workspace/src/github.com/docker/docker/image/image.go new file mode 100644 index 00000000..b33b0b84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/image.go @@ -0,0 +1,128 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/engine-api/types/container" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return digest.Digest(id).String() +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent id of the image + Parent string `json:"parent,omitempty"` + // Comment user added comment + Comment string `json:"comment,omitempty"` + // Created timestamp when image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies version on which image is built + DockerVersion string `json:"docker_version,omitempty"` + // Author of the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// History stores build commands that were used to create an image +type History struct { + // Created timestamp for build point + Created time.Time `json:"created"` + // Author of the build point + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building image. + CreatedBy string `json:"created_by,omitempty"` + // Comment is custom message set by the user when creating the image. + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Exporter provides interface for exporting and importing images +type Exporter interface { + Load(io.ReadCloser, io.Writer) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("Invalid image JSON, no RootFS key.") + } + + img.rawJSON = src + + return img, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/rootfs.go b/Godeps/_workspace/src/github.com/docker/docker/image/rootfs.go new file mode 100644 index 00000000..b546696d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,8 @@ +package image + +import "github.com/docker/docker/layer" + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_unix.go b/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_unix.go new file mode 100644 index 00000000..285d466b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package image + +import "github.com/docker/docker/layer" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into a interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + return layer.CreateChainID(r.DiffIDs) +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: "layers"} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_windows.go b/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_windows.go new file mode 100644 index 00000000..10b85497 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package image + +import ( + "crypto/sha512" + "fmt" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into a interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` +} + +// BaseLayerID returns the 64 byte hex ID for the baselayer name. +func (r *RootFS) BaseLayerID() string { + baseID := sha512.Sum384([]byte(r.BaseLayer)) + return fmt.Sprintf("%x", baseID[:32]) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + baseDiffID := digest.FromBytes([]byte(r.BaseLayerID())) + return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...)) +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: "layers+base"} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md b/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md new file mode 100644 index 00000000..f2c29155 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendent layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/store.go b/Godeps/_workspace/src/github.com/docker/docker/image/store.go new file mode 100644 index 00000000..302be2f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/store.go @@ -0,0 +1,289 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digest.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digest.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(id ID) error { + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image %v, %v", id, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(digest.Digest(id)); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[ID(id)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := ID(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + return "", err + } + return ID(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id, "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(digest.Digest(id)); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id, "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id, "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/load.go b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/load.go new file mode 100644 index 00000000..9724c5ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/load.go @@ -0,0 +1,289 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer) error { + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream) + } + return manifestFile.Close() + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID, outStream) + } + + } + + return nil +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS) (layer.Layer, error) { + rawTar, err := os.Open(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + inflatedLayerData, err := archive.DecompressStream(rawTar) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return repositoriesFile.Close() + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID, outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/save.go b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/save.go new file mode 100644 index 00000000..b7022ac5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/save.go @@ -0,0 +1,300 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(assoc.ImageID, assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + var imgID image.ID + if imgID, err = l.rs.Get(ref); err != nil { + return nil, err + } + addAssoc(imgID, ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + + for id, imageDescr := range s.images { + if err = s.saveImage(id); err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: digest.Digest(id).Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + }) + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := os.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(manifest); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := os.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) error { + img, err := s.is.Get(id) + if err != nil { + return err + } + + if len(img.RootFS.DiffIDs) == 0 { + return fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{} + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { + return err + } + layers = append(layers, v1Img.ID) + parent = v1ID + } + + configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return err + } + if err := os.Chtimes(configFile, img.Created, img.Created); err != nil { + return err + } + + s.images[id].layers = layers + return nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return err + } + + // serialize filesystem + tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) + if err != nil { + return err + } + defer tarFile.Close() + + l, err := s.ls.Get(id) + if err != nil { + return err + } + defer layer.ReleaseAndLog(s.ls, l) + + arch, err := l.TarStream() + if err != nil { + return err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := os.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return err + } + } + + s.savedLayers[legacyImg.ID] = struct{}{} + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/tarexport.go b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/tarexport.go new file mode 100644 index 00000000..cc8cdc85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/tarexport.go @@ -0,0 +1,36 @@ +package tarexport + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/v1/imagev1.go b/Godeps/_workspace/src/github.com/docker/docker/image/v1/imagev1.go new file mode 100644 index 00000000..cdea0e72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/image/v1/imagev1.go @@ -0,0 +1,148 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/version" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = version.Version("1.8.3") + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd.Slice(), " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsitent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID '%s' is invalid ", id) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/empty.go b/Godeps/_workspace/src/github.com/docker/docker/layer/empty.go new file mode 100644 index 00000000..5e1cb184 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/empty.go @@ -0,0 +1,48 @@ +package layer + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/filestore.go b/Godeps/_workspace/src/github.com/docker/docker/layer/filestore.go new file mode 100644 index 00000000..a0044b36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,326 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + root string +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + + td, err := ioutil.TempDir(tmpDir, "layer-") + if err != nil { + return nil, err + } + // Create a new tempdir + return &fileMetadataTransaction{ + store: fms, + root: td, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + return os.Rename(fm.root, finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return os.RemoveAll(fm.root) +} + +func (fm *fileMetadataTransaction) String() string { + return fm.root +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer.go b/Godeps/_workspace/src/github.com/docker/docker/layer/layer.go new file mode 100644 index 00000000..ef2ac7ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/layer.go @@ -0,0 +1,256 @@ +// Package layer is package for managing read only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read only layers and export both +// read only and writable layers. The exported +// tar data for a read only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/archive" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read only layer +type Layer interface { + TarStreamer + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, use this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_store.go b/Godeps/_workspace/src/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 00000000..619c1a30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,626 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New( + options.StorePath, + options.GraphDriver, + options.GraphDriverOptions, + options.UIDMaps, + options.GIDMaps) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.New() + tr := io.TeeReader(ts, digester.Hash()) + + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + layer := ls.get(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) + if err != nil { + return nil, err + } + m.initID = pid + } + + if err = ls.driver.Create(m.mountID, pid, ""); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + if err := ls.driver.Create(initID, parent, mountLabel); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + type diffPathDriver interface { + DiffPath(string) (string, func() error, error) + } + + diffDriver, ok := ls.driver.(diffPathDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fsPath, releasePath, err := diffDriver.DiffPath(graphID) + if err != nil { + return err + } + defer releasePath() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + fileGetter := storage.NewPathFileGetter(fsPath) + logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath) + return asm.WriteOutputTarStream(fileGetter, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +func (n *naiveDiffPathDriver) DiffPath(id string) (string, func() error, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return "", nil, err + } + return p, func() error { + return n.Driver.Put(id) + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_unix.go b/Godeps/_workspace/src/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 00000000..524b97e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_windows.go b/Godeps/_workspace/src/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 00000000..e20311a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,98 @@ +package layer + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" +) + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { + var err error // this is used for cleanup in existingLayer case + diffID := digest.FromBytes([]byte(graphID)) + + // Create new roLayer + layer := &roLayer{ + cacheID: graphID, + diffID: DiffID(diffID), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + size: size, + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + layer.chainID = createChainIDFromParent("", layer.diffID) + + if !ls.driver.Exists(layer.cacheID) { + return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) + } + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} + +func (ls *layerStore) GraphDriver() graphdriver.Driver { + return ls.driver +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/migration.go b/Godeps/_workspace/src/github.com/docker/docker/layer/migration.go new file mode 100644 index 00000000..9779ab79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/migration.go @@ -0,0 +1,255 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return errors.New("graph ID does not exist") + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.New() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/mounted_layer.go b/Godeps/_workspace/src/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 00000000..b3d65688 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,144 @@ +package layer + +import ( + "io" + "sync" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) + if err != nil { + return nil, err + } + return archiver, nil +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Mount(mountLabel string) (string, error) { + return ml.layerStore.driver.Get(ml.mountID, mountLabel) +} + +func (ml *mountedLayer) Unmount() error { + return ml.layerStore.driver.Put(ml.mountID) +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + rl, ok := ml.references[ref] + if !ok { + return ErrLayerNotRetained + } + + if err := rl.release(); err != nil { + return err + } + delete(ml.references, ref) + + return nil +} + +type referencedRWLayer struct { + *mountedLayer + + activityL sync.Mutex + activityCount int +} + +func (rl *referencedRWLayer) release() error { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + if rl.activityCount > 0 { + return ErrActiveMount + } + + rl.activityCount = -1 + + return nil +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + if rl.activityCount == -1 { + return "", ErrLayerNotRetained + } + + rl.activityCount++ + return rl.mountedLayer.Mount(mountLabel) +} + +func (rl *referencedRWLayer) Unmount() error { + rl.activityL.Lock() + defer rl.activityL.Unlock() + + if rl.activityCount == 0 { + return ErrNotMounted + } + if rl.activityCount == -1 { + return ErrLayerNotRetained + } + rl.activityCount-- + + return rl.mountedLayer.Unmount() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/ro_layer.go b/Godeps/_workspace/src/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 00000000..51e0921d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,119 @@ +package layer + +import "io" + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + + referenceCount int + references map[Layer]struct{} +} + +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + return pr, nil +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go new file mode 100644 index 00000000..d1b69854 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,146 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "runtime" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter + // is not supplied. A better longer term solution would be to use a named + // pipe as the default on the Windows daemon. + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + _, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) + if err != nil { + return val, err + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultHost, val string) (string, error) { + host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) + if err != nil { + return val, err + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr +// defaultUnixAddr must be a absolute file path (no `unix://` prefix) +// defaultTCPAddr must be the full `tcp://host:port` form +func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if defaultAddr == defaultTLSHost { + return defaultTLSHost, nil + } + if runtime.GOOS != "windows" { + return fmt.Sprintf("unix://%s", defaultUnixAddr), nil + } + return defaultTCPAddr, nil + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return parseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return parseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. +func parseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +// parseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 00000000..611407a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 00000000..ec52e9a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = DefaultTCPHost diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go b/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go new file mode 100644 index 00000000..c7b0dc99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go @@ -0,0 +1,42 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go new file mode 100644 index 00000000..05aadbe7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go @@ -0,0 +1,242 @@ +package opts + +import ( + "fmt" + "net" + "regexp" + "strings" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +//MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts_unix.go b/Godeps/_workspace/src/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 00000000..f1ce844a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go b/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 00000000..2a9e2be7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP4. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 00000000..7307d969 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 00000000..1281683e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1049 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader + // Compression is the state represents if compressed or not. + Compression int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Debugf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Debugf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + if !exceptions && f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", filePath, err) + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := system.MkdirAll(dst, 0755); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 00000000..fbc3bb8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,112 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 00000000..5c3a1be3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 00000000..81651c61 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,416 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, WhiteoutPrefix) { + originalFile := file[len(WhiteoutPrefix):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 00000000..dee8b7c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,285 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 00000000..da70ed37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 00000000..3778b732 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 00000000..af94243f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 00000000..e1fa73f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 00000000..e305b5e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 00000000..2b775b45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 00000000..1b08ad33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 00000000..cedd46a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..e85aac05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/whiteouts.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 00000000..d20478a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 00000000..dfb335c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..a7814f5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000..51a43f67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,91 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + flush(os.Stdin) + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000..0a500ed5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 00000000..94131a6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,19 @@ +package chrootarchive + +import "github.com/docker/docker/pkg/archive" + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000..4196dd4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,118 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + flush(os.Stdout) + flush(os.Stdin) + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000..8e1830cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,44 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000..49fcacce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) { + io.Copy(ioutil.Discard, r) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000..fa17c9bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..b5057ecd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,279 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if strings.Index(".$", string(ch)) != -1 { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..d5c3abf5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 00000000..8154e83f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go new file mode 100644 index 00000000..d7dc4387 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go @@ -0,0 +1,56 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error. +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +// ServerHeader contains the server information. +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). +func ParseServerHeader(hdr string) (*ServerHeader, error) { + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader + } + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go new file mode 100644 index 00000000..d5cf34e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go @@ -0,0 +1,30 @@ +package httputils + +import ( + "mime" + "net/http" +) + +// MimeTypes stores the MIME content type. +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 00000000..bebc8608 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 00000000..a1301ee9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,195 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username { + // return the first entry for a user; ignores potential for multiple ranges per user + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000..b57d6ef1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package idtools + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/system" +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..c9e3c937 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000..c1eedff1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,155 @@ +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup +// useradd -M -u -s /bin/nologin -N -g +// addgroup --gid +// groupadd -g + +const baseUID int = 10000 +const baseGID int = 10000 +const idMAX int = 65534 + +var ( + userCommand string + groupCommand string + + cmdTemplates = map[string]string{ + "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", + "useradd": "-M -u %d -s /bin/false -N -g %s %s", + "addgroup": "--gid %d %s", + "groupadd": "-g %d %s", + } +) + +func init() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + if _, err := resolveBinary("addgroup"); err == nil { + groupCommand = "addgroup" + } else if _, err := resolveBinary("groupadd"); err == nil { + groupCommand = "groupadd" + } +} + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +// This new user's /etc/sub{uid,gid} ranges will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + // Find unused uid, gid pair + uid, err := findUnusedUID(baseUID) + if err != nil { + return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) + } + gid, err := findUnusedGID(baseGID) + if err != nil { + return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) + } + + // First add the group that we will use + if err := addGroup(name, gid); err != nil { + return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) + } + // Add the user as a member of the group + if err := addUser(name, uid, name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + return uid, gid, nil +} + +func addUser(userName string, uid int, groupName string) error { + + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) + return execAddCmd(userCommand, args) +} + +func addGroup(groupName string, gid int) error { + + if groupCommand == "" { + return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") + } + args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) + // only error out if the error isn't that the group already exists + // if the group exists then our needs are already met + if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { + return err + } + return nil +} + +func execAddCmd(cmd, args string) error { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + out, err := execCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) + } + return nil +} + +func findUnusedUID(startUID int) (int, error) { + return findUnused("passwd", startUID) +} + +func findUnusedGID(startGID int) (int, error) { + return findUnused("group", startGID) +} + +func findUnused(file string, id int) (int, error) { + for { + cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) + cmd := exec.Command("sh", "-c", cmdStr) + if err := cmd.Run(); err != nil { + // if a non-zero return code occurs, then we know the ID was not found + // and is usable + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + if status.ExitStatus() == 1 { + //no match, we can use this ID + return id, nil + } + } + } + return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) + } + id++ + if id > idMAX { + return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000..d98b354c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 00000000..e263c284 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,152 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +// ErrClosed is returned when Write is called on a closed BytesPipe. +var ErrClosed = errors.New("write to closed BytesPipe") + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf [][]byte // slice of byte-slices of buffered data + lastRead int // index in the first slice to a read point + bufLen int // length of data buffered over the slices + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe(buf []byte) *BytesPipe { + if cap(buf) == 0 { + buf = make([]byte, 0, 64) + } + bp := &BytesPipe{ + buf: [][]byte{buf[:0]}, + } + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + defer bp.mu.Unlock() + written := 0 + for { + if bp.closeErr != nil { + return written, ErrClosed + } + // write data to the last buffer + b := bp.buf[len(bp.buf)-1] + // copy data to the current empty allocated area + n := copy(b[len(b):cap(b)], p) + // increment buffered data length + bp.bufLen += n + // include written data in last buffer + bp.buf[len(bp.buf)-1] = b[:len(b)+n] + + written += n + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // block if too much data is still in the buffer + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + } + + // allocate slice that has twice the size of the last unless maximum reached + nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) + if nextCap > maxCap { + nextCap = maxCap + } + // add new byte slice to the buffers slice and continue writing + bp.buf = append(bp.buf, make([]byte, 0, nextCap)) + } + bp.wait.Broadcast() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +func (bp *BytesPipe) len() int { + return bp.bufLen - bp.lastRead +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.len() == 0 { + if bp.closeErr != nil { + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.len() == 0 && bp.closeErr != nil { + return 0, bp.closeErr + } + } + for { + read := copy(p, bp.buf[0][bp.lastRead:]) + n += read + bp.lastRead += read + if bp.len() == 0 { + // we have read everything. reset to the beginning. + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = bp.buf[0][:0] + break + } + // break if everything was read + if len(p) == read { + break + } + // more buffered data and more asked. read from next slice. + p = p[read:] + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = nil // throw away old slice + bp.buf = bp.buf[1:] // switch to next + } + bp.wait.Broadcast() + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 00000000..0b04b0ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 00000000..0d2d76b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 00000000..e73b02bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps a io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go new file mode 100644 index 00000000..3c88f29e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go @@ -0,0 +1,6 @@ +// +build !gccgo + +package ioutils + +func callSchedulerIfNecessary() { +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 00000000..c11d02b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go @@ -0,0 +1,13 @@ +// +build gccgo + +package ioutils + +import ( + "runtime" +) + +func callSchedulerIfNecessary() { + //allow or force Go scheduler to switch context, without explicitly + //forcing this will make it hang when using gccgo implementation + runtime.Gosched() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000..1539ad21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000..c258e5fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000..2b35a266 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + mu sync.Mutex + w io.Writer + flusher http.Flusher + flushed bool + closed error + + // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a + // channel will allow one to properly order the operations. +} + +var errWriteFlusherClosed = errors.New("writeflusher: closed") + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.mu.Lock() + defer wf.mu.Unlock() + if wf.closed != nil { + return 0, wf.closed + } + + n, err = wf.w.Write(b) + wf.flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.mu.Lock() + defer wf.mu.Unlock() + + wf.flush() +} + +// flush the stream immediately without taking a lock. Used internally. +func (wf *WriteFlusher) flush() { + if wf.closed != nil { + return + } + + wf.flushed = true + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + wf.mu.Lock() + defer wf.mu.Unlock() + + return wf.flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.mu.Lock() + defer wf.mu.Unlock() + + if wf.closed != nil { + return wf.closed + } + + wf.closed = errWriteFlusherClosed + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 00000000..ccc7f9c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog.go new file mode 100644 index 00000000..422e4bbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog.go @@ -0,0 +1,40 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 00000000..31b047e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,180 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 00000000..ff7aaf16 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,124 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/time_marshalling.go new file mode 100644 index 00000000..21173381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/time_marshalling.go @@ -0,0 +1,27 @@ +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 00000000..65cccbce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,221 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" +) + +// JSONError wraps a concrete Code and Message, `Code` is +// is a integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(float64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } else { + diff = len(ids) - line + } + if isTerminal { + // NOTE: this appears to be necessary even if + // diff == 0. + // [{diff}A = move cursor up diff rows + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + // NOTE: this appears to be necessary even if + // diff == 0. + // [{diff}B = move cursor down diff rows + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE new file mode 100644 index 00000000..9b4f4a29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md new file mode 100644 index 00000000..da00efa3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md @@ -0,0 +1,40 @@ +Package mflag (aka multiple-flag) implements command-line flag parsing. +It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) + +It adds: + +* both short and long flag version +`./example -s red` `./example --string blue` + +* multiple names for the same option +``` +$>./example -h +Usage of example: + -s, --string="": a simple string +``` + +___ +It is very flexible on purpose, so you can do things like: +``` +$>./example -h +Usage of example: + -s, -string, --string="": a simple string +``` + +Or: +``` +$>./example -h +Usage of example: + -oldflag, --newflag="": a simple string +``` + +You can also hide some flags from the usage, so if we want only `--newflag`: +``` +$>./example -h +Usage of example: + --newflag="": a simple string +$>./example -oldflag str +str +``` + +See [example.go](example/example.go) for more details. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go new file mode 100644 index 00000000..2e766dd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + i int + str string + b, b2, h bool +) + +func init() { + flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") + flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") + flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") + flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") + flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage + flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") + flag.Parse() +} +func main() { + if h { + flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %t\n", b) + fmt.Printf("-bool: %t\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go new file mode 100644 index 00000000..2ad299ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go @@ -0,0 +1,1261 @@ +// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mflag implements command-line flag parsing. +// +// Usage: +// +// Define flags using flag.String(), Bool(), Int(), etc. +// +// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. +// import "flag /github.com/docker/docker/pkg/mflag" +// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") +// If you like, you can bind the flag to a variable using the Var() functions. +// var flagvar int +// func init() { +// // -flaghidden will work, but will be hidden from the usage +// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") +// } +// Or you can create custom flags that satisfy the Value interface (with +// pointer receivers) and couple them to flag parsing by +// flag.Var(&flagVal, []string{"name"}, "help message for flagname") +// For such flags, the default value is just the initial value of the variable. +// +// You can also add "deprecated" flags, they are still usable, but are not shown +// in the usage and will display a warning when you try to use them. `#` before +// an option means this option is deprecated, if there is an following option +// without `#` ahead, then that's the replacement, if not, it will just be removed: +// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") +// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or +// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` +// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") +// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` +// so you can only use `-f`. +// +// You can also group one letter flags, bif you declare +// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") +// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") +// you will be able to use the -vs or -sv +// +// After all flags are defined, call +// flag.Parse() +// to parse the command line into the defined flags. +// +// Flags may then be used directly. If you're using the flags themselves, +// they are all pointers; if you bind to variables, they're values. +// fmt.Println("ip has value ", *ip) +// fmt.Println("flagvar has value ", flagvar) +// +// After parsing, the arguments after the flag are available as the +// slice flag.Args() or individually as flag.Arg(i). +// The arguments are indexed from 0 through flag.NArg()-1. +// +// Command line flag syntax: +// -flag +// -flag=x +// -flag="x" +// -flag='x' +// -flag x // non-boolean flags only +// One or two minus signs may be used; they are equivalent. +// The last form is not permitted for boolean flags because the +// meaning of the command +// cmd -x * +// will change if there is a file called 0, false, etc. You must +// use the -flag=false form to turn off a boolean flag. +// +// Flag parsing stops just before the first non-flag argument +// ("-" is a non-flag argument) or after the terminator "--". +// +// Integer flags accept 1234, 0664, 0x1234 and may be negative. +// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. +// Duration flags accept any input valid for time.ParseDuration. +// +// The default set of command-line flags is controlled by +// top-level functions. The FlagSet type allows one to define +// independent sets of flags, such as to implement subcommands +// in a command-line interface. The methods of FlagSet are +// analogous to the top-level functions for the command-line +// flag set. + +package mflag + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/docker/docker/pkg/homedir" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// ErrRetry is the error returned if you need to try letter by letter +var ErrRetry = errors.New("flag: retry") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint16 Value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Get() interface{} { return uint16(*i) } + +func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +// ErrorHandling strategies available when a flag parsing error occurs +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + ShortUsage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use Out() accessor + nArgRequirements []nArgRequirement +} + +// A Flag represents the state of a flag. +type Flag struct { + Names []string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") + lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) + if lpi != lpj { + return lpi < lpj + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + var list flagSlice + + // The sorted list is based on the first name, when flag map might use the other names. + nameMap := make(map[string]string) + + for n, f := range flags { + fName := strings.TrimPrefix(f.Names[0], "#") + nameMap[fName] = n + if len(f.Names) == 1 { + list = append(list, fName) + continue + } + + found := false + for _, name := range list { + if name == fName { + found = true + break + } + } + if !found { + list = append(list, fName) + } + } + sort.Sort(list) + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[nameMap[name]] + } + return result +} + +// Name returns the name of the FlagSet. +func (fs *FlagSet) Name() string { + return fs.name +} + +// Out returns the destination for usage and error messages. +func (fs *FlagSet) Out() io.Writer { + if fs.output == nil { + return os.Stderr + } + return fs.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (fs *FlagSet) SetOutput(output io.Writer) { + fs.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (fs *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(fs.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (fs *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(fs.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (fs *FlagSet) Lookup(name string) *Flag { + return fs.formal[name] +} + +// IsSet indicates whether the specified flag is set in the given FlagSet +func (fs *FlagSet) IsSet(name string) bool { + return fs.actual[name] != nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// IsSet indicates whether the specified flag was specified at all on the cmd line. +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + +type nArgRequirementType int + +// Indicator used to pass to BadArgs function +const ( + Exact nArgRequirementType = iota + Max + Min +) + +type nArgRequirement struct { + Type nArgRequirementType + N int +} + +// Require adds a requirement about the number of arguments for the FlagSet. +// The first parameter can be Exact, Max, or Min to respectively specify the exact, +// the maximum, or the minimal number of arguments required. +// The actual check is done in FlagSet.CheckArgs(). +func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { + fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) +} + +// CheckArgs uses the requirements set by FlagSet.Require() to validate +// the number of arguments. If the requirements are not met, +// an error message string is returned. +func (fs *FlagSet) CheckArgs() (message string) { + for _, req := range fs.nArgRequirements { + var arguments string + if req.N == 1 { + arguments = "1 argument" + } else { + arguments = fmt.Sprintf("%d arguments", req.N) + } + + str := func(kind string) string { + return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) + } + + switch req.Type { + case Exact: + if fs.NArg() != req.N { + return str("") + } + case Max: + if fs.NArg() > req.N { + return str("a maximum of ") + } + case Min: + if fs.NArg() < req.N { + return str("a minimum of ") + } + } + } + return "" +} + +// Set sets the value of the named flag. +func (fs *FlagSet) Set(name, value string) error { + flag, ok := fs.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if err := flag.Value.Set(value); err != nil { + return err + } + if fs.actual == nil { + fs.actual = make(map[string]*Flag) + } + fs.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// isZeroValue guesses whether the string represents the zero +// value for a flag. It is not accurate but in practice works OK. +func isZeroValue(value string) bool { + switch value { + case "false": + return true + case "": + return true + case "0": + return true + } + return false +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (fs *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) + home := homedir.Get() + + // Don't substitute when HOME is / + if runtime.GOOS != "windows" && home == "/" { + home = "" + } + + // Add a blank line between cmd description and list of options + if fs.FlagCount() > 0 { + fmt.Fprintln(writer, "") + } + + fs.VisitAll(func(flag *Flag) { + names := []string{} + for _, name := range flag.Names { + if name[0] != '#' { + names = append(names, name) + } + } + if len(names) > 0 && len(flag.Usage) > 0 { + val := flag.DefValue + + if home != "" && strings.HasPrefix(val, home) { + val = homedir.GetShortcutString() + val[len(home):] + } + + if isZeroValue(val) { + format := " -%s" + fmt.Fprintf(writer, format, strings.Join(names, ", -")) + } else { + format := " -%s=%s" + fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) + } + for _, line := range strings.Split(flag.Usage, "\n") { + fmt.Fprintln(writer, "\t", line) + } + } + }) + writer.Flush() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(fs *FlagSet) { + if fs.name == "" { + fmt.Fprintf(fs.Out(), "Usage:\n") + } else { + fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) + } + fs.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// ShortUsage prints to standard error a usage message documenting the standard command layout +// The function is a variable that may be changed to point to a custom function. +var ShortUsage = func() { + fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) +} + +// FlagCount returns the number of flags that have been defined. +func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } + +// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. +func (fs *FlagSet) FlagCountUndeprecated() int { + count := 0 + for _, flag := range sortFlags(fs.formal) { + for _, name := range flag.Names { + if name[0] != '#' { + count++ + break + } + } + } + return count +} + +// NFlag returns the number of flags that have been set. +func (fs *FlagSet) NFlag() int { return len(fs.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (fs *FlagSet) Arg(i int) string { + if i < 0 || i >= len(fs.args) { + return "" + } + return fs.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (fs *FlagSet) NArg() int { return len(fs.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (fs *FlagSet) Args() []string { return fs.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + fs.Var(newBoolValue(value, p), names, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, names []string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), names, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { + p := new(bool) + fs.BoolVar(p, names, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(names []string, value bool, usage string) *bool { + return CommandLine.Bool(names, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { + fs.Var(newIntValue(value, p), names, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, names []string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), names, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (fs *FlagSet) Int(names []string, value int, usage string) *int { + p := new(int) + fs.IntVar(p, names, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(names []string, value int, usage string) *int { + return CommandLine.Int(names, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + fs.Var(newInt64Value(value, p), names, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, names []string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), names, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { + p := new(int64) + fs.Int64Var(p, names, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(names []string, value int64, usage string) *int64 { + return CommandLine.Int64(names, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + fs.Var(newUintValue(value, p), names, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, names []string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), names, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { + p := new(uint) + fs.UintVar(p, names, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(names []string, value uint, usage string) *uint { + return CommandLine.Uint(names, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + fs.Var(newUint64Value(value, p), names, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, names []string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), names, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { + p := new(uint64) + fs.Uint64Var(p, names, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(names []string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(names, value, usage) +} + +// Uint16Var defines a uint16 flag with specified name, default value, and usage string. +// The argument p points to a uint16 variable in which to store the value of the flag. +func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) { + fs.Var(newUint16Value(value, p), names, usage) +} + +// Uint16Var defines a uint16 flag with specified name, default value, and usage string. +// The argument p points to a uint16 variable in which to store the value of the flag. +func Uint16Var(p *uint16, names []string, value uint16, usage string) { + CommandLine.Var(newUint16Value(value, p), names, usage) +} + +// Uint16 defines a uint16 flag with specified name, default value, and usage string. +// The return value is the address of a uint16 variable that stores the value of the flag. +func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 { + p := new(uint16) + fs.Uint16Var(p, names, value, usage) + return p +} + +// Uint16 defines a uint16 flag with specified name, default value, and usage string. +// The return value is the address of a uint16 variable that stores the value of the flag. +func Uint16(names []string, value uint16, usage string) *uint16 { + return CommandLine.Uint16(names, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { + fs.Var(newStringValue(value, p), names, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, names []string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), names, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (fs *FlagSet) String(names []string, value string, usage string) *string { + p := new(string) + fs.StringVar(p, names, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(names []string, value string, usage string) *string { + return CommandLine.String(names, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + fs.Var(newFloat64Value(value, p), names, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, names []string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), names, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { + p := new(float64) + fs.Float64Var(p, names, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(names []string, value float64, usage string) *float64 { + return CommandLine.Float64(names, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + fs.Var(newDurationValue(value, p), names, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), names, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + fs.DurationVar(p, names, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(names []string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(names, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (fs *FlagSet) Var(value Value, names []string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{names, usage, value, value.String()} + for _, name := range names { + name = strings.TrimPrefix(name, "#") + _, alreadythere := fs.formal[name] + if alreadythere { + var msg string + if fs.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) + } + fmt.Fprintln(fs.Out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if fs.formal == nil { + fs.formal = make(map[string]*Flag) + } + fs.formal[name] = flag + } +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, names []string, usage string) { + CommandLine.Var(value, names, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (fs *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(fs.Out(), err) + if os.Args[0] == fs.name { + fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) + } else { + fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) + } + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (fs *FlagSet) usage() { + if fs == CommandLine { + Usage() + } else if fs.Usage == nil { + defaultUsage(fs) + } else { + fs.Usage() + } +} + +func trimQuotes(str string) string { + if len(str) == 0 { + return str + } + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (fs *FlagSet) parseOne() (bool, string, error) { + if len(fs.args) == 0 { + return false, "", nil + } + s := fs.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, "", nil + } + if s[1] == '-' && len(s) == 2 { // "--" terminates the flags + fs.args = fs.args[1:] + return false, "", nil + } + name := s[1:] + if len(name) == 0 || name[0] == '=' { + return false, "", fs.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + fs.args = fs.args[1:] + hasValue := false + value := "" + if i := strings.Index(name, "="); i != -1 { + value = trimQuotes(name[i+1:]) + hasValue = true + name = name[:i] + } + + m := fs.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "-help" || name == "help" || name == "h" { // special case for nice help message. + fs.usage() + return false, "", ErrHelp + } + if len(name) > 0 && name[0] == '-' { + return false, "", fs.failf("flag provided but not defined: -%s", name) + } + return false, name, ErrRetry + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(fs.args) > 0 { + // value is the next arg + hasValue = true + value, fs.args = fs.args[0], fs.args[1:] + } + if !hasValue { + return false, "", fs.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if fs.actual == nil { + fs.actual = make(map[string]*Flag) + } + fs.actual[name] = flag + for i, n := range flag.Names { + if n == fmt.Sprintf("#%s", name) { + replacement := "" + for j := i; j < len(flag.Names); j++ { + if flag.Names[j][0] != '#' { + replacement = flag.Names[j] + break + } + } + if replacement != "" { + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + } else { + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } + } + } + return true, "", nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (fs *FlagSet) Parse(arguments []string) error { + fs.parsed = true + fs.args = arguments + for { + seen, name, err := fs.parseOne() + if seen { + continue + } + if err == nil { + break + } + if err == ErrRetry { + if len(name) > 1 { + err = nil + for _, letter := range strings.Split(name, "") { + fs.args = append([]string{"-" + letter}, fs.args...) + seen2, _, err2 := fs.parseOne() + if seen2 { + continue + } + if err2 != nil { + err = fs.failf("flag provided but not defined: -%s", name) + break + } + } + if err == nil { + continue + } + } else { + err = fs.failf("flag provided but not defined: -%s", name) + } + } + switch fs.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(125) + case PanicOnError: + panic(err) + } + } + return nil +} + +// ParseFlags is a utility function that adds a help flag if withHelp is true, +// calls fs.Parse(args) and prints a relevant error message if there are +// incorrect number of arguments. It returns error only if error handling is +// set to ContinueOnError and parsing fails. If error handling is set to +// ExitOnError, it's safe to ignore the return value. +func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { + var help *bool + if withHelp { + help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") + } + if err := fs.Parse(args); err != nil { + return err + } + if help != nil && *help { + fs.SetOutput(os.Stdout) + fs.Usage() + os.Exit(0) + } + if str := fs.CheckArgs(); str != "" { + fs.SetOutput(os.Stderr) + fs.ReportError(str, withHelp) + fs.ShortUsage() + os.Exit(1) + } + return nil +} + +// ReportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func (fs *FlagSet) ReportError(str string, withHelp bool) { + if withHelp { + if os.Args[0] == fs.Name() { + str += ".\nSee '" + os.Args[0] + " --help'" + } else { + str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" + } + } + fmt.Fprintf(fs.Out(), "docker: %s.\n", str) +} + +// Parsed reports whether fs.Parse has been called. +func (fs *FlagSet) Parsed() bool { + return fs.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { + fs.name = name + fs.errorHandling = errorHandling +} + +type mergeVal struct { + Value + key string + fset *FlagSet +} + +func (v mergeVal) Set(s string) error { + return v.fset.Set(v.key, s) +} + +func (v mergeVal) IsBoolFlag() bool { + if b, ok := v.Value.(boolFlag); ok { + return b.IsBoolFlag() + } + return false +} + +// Merge is an helper function that merges n FlagSets into a single dest FlagSet +// In case of name collision between the flagsets it will apply +// the destination FlagSet's errorHandling behavior. +func Merge(dest *FlagSet, flagsets ...*FlagSet) error { + for _, fset := range flagsets { + for k, f := range fset.formal { + if _, ok := dest.formal[k]; ok { + var err error + if fset.name == "" { + err = fmt.Errorf("flag redefined: %s", k) + } else { + err = fmt.Errorf("%s flag redefined: %s", fset.name, k) + } + fmt.Fprintln(fset.Out(), err.Error()) + // Happens only if flags are declared with identical names + switch dest.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + newF := *f + newF.Value = mergeVal{f.Value, k, fset} + dest.formal[k] = &newF + } + } + return nil +} + +// IsEmpty reports if the FlagSet is actually empty. +func (fs *FlagSet) IsEmpty() bool { + return len(fs.actual) == 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..a21ba137 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,100 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000..85ca250c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,67 @@ +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..7d12fcbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthgrouh for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..79c66b32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 00000000..76e84f9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progress.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 00000000..61315cb8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,73 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns a Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progressreader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 00000000..c39e2b69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,59 @@ +package progress + +import ( + "io" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go new file mode 100644 index 00000000..70de4d13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 00000000..45592ce8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000..c7f797a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go @@ -0,0 +1,23 @@ +// +build freebsd + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 00000000..3c3a73a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which have Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..ad4ea38e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux and Windows. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 00000000..8d65e0ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 00000000..ceb98d25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registred under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 00000000..37a5098f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 00000000..02d2594e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,71 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a langer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + trimTo := shortLen + if len(id) < shortLen { + trimTo = len(id) + } + return id[:trimTo] +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns an unique id. +func GenerateRandomID() string { + return generateID(true) + +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000..34c4ea7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000..9b4f4a29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 00000000..acf3f566 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,47 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 00000000..28831898 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 00000000..04e2de78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 00000000..c14feb84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 00000000..16823d55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,82 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 00000000..bd23c4d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 00000000..49e87eb4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 00000000..3b6e947e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 00000000..66731a96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,66 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given a io.Reader to the file. +// +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000..82ddd30c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 00000000..d4664259 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 00000000..73958182 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 00000000..2e863c02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 00000000..1b6cc9cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 00000000..09e7f89f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package system + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 00000000..087034c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 00000000..d0fb6f15 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 00000000..8b1eded1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_solaris.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 00000000..b01d08ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 00000000..c6075d4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 00000000..39490c62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 00000000..f1497c58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 00000000..273aa234 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,36 @@ +package system + +import ( + "fmt" + "syscall" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() (OSVersion, error) { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + return osv, fmt.Errorf("Failed to call GetVersion()") + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv, nil +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 00000000..c670fcd7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and return oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 00000000..13f1de17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go new file mode 100644 index 00000000..0a161975 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go @@ -0,0 +1,8 @@ +package system + +import "syscall" + +// LUtimesNano is not supported by darwin platform. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..e2eac3b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 00000000..fc8a1aba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..50c3a043 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..d2e2c057 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..0114f222 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 00000000..b42983e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 00000000..5abf5e7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 00000000..4dc89bd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,294 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 00000000..77927ee7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 00000000..28822868 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 00000000..9727ecde --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/ascii.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/ascii.go new file mode 100644 index 00000000..f5262bcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go new file mode 100644 index 00000000..1005084f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -0,0 +1,51 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthgrouh for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + newState.Oflag = newState.Oflag | C.OPOST + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go new file mode 100644 index 00000000..266039ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go @@ -0,0 +1,19 @@ +// +build !windows +// +build !linux !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 00000000..316c3990 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,132 @@ +// +build !windows + +// Package term provides provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "io" + "os" + "os/signal" + "syscall" + "unsafe" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skip errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skip errno = 0 + if err == 0 { + return nil + } + return err +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 00000000..798bc719 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,215 @@ +// +build windows + +package term + +import ( + "io" + "os" + "os/signal" + "syscall" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + switch { + case os.Getenv("ConEmuANSI") == "ON": + // The ConEmu shell emulates ANSI well by default. + return os.Stdin, os.Stdout, os.Stderr + case os.Getenv("MSYSTEM") != "": + // MSYS (mingw) does not emulate ANSI well. + return windows.ConsoleStreams() + default: + if useNativeConsole() { + return os.Stdin, os.Stdout, os.Stderr + } + return windows.ConsoleStreams() + } +} + +// useNativeConsole determines if the docker client should use the built-in +// console which supports ANSI emulation, or fall-back to the golang emulator +// (github.com/azure/go-ansiterm). +func useNativeConsole() bool { + osv, err := system.GetOSVersion() + if err != nil { + return false + } + + // Native console is not available major version 10 + if osv.MajorVersion < 10 { + return false + } + + // Must have a late pre-release TP4 build of Windows Server 2016/Windows 10 TH2 or later + if osv.Build < 10578 { + return false + } + + // Environment variable override + if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { + if e == "1" { + return true + } + return false + } + + // Get the handle to stdout + stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE) + if err != nil { + return false + } + + // Get the console mode from the consoles stdout handle + var mode uint32 + if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil { + return false + } + + // Legacy mode does not have native ANSI emulation. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + const enableVirtualTerminalProcessing = 0x0004 + if mode&enableVirtualTerminalProcessing == 0 { + return false + } + + // TODO Windows (Post TP4). The native emulator still has issues which + // mean it shouldn't be enabled for everyone. Change this next line to true + // to change the default to "enable if available". In the meantime, users + // can still try it out by using USE_NATIVE_CONSOLE env variable. + return false +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windows.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + x: 0, + y: 0} + + // Note: GetWinsize is called frequently -- uncomment only for excessive details + // logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String()) + // logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y) + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windows.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + mode := state.mode + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go new file mode 100644 index 00000000..480db900 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go new file mode 100644 index 00000000..ed843ad6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 00000000..22921b6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,47 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 00000000..3bf2b2b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,257 @@ +// +build windows + +package windows + +import ( + "bytes" + "errors" + "fmt" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +func newAnsiReader(nFile int) *ansiReader { + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("Unexpected copy length encountered.") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[winterm.WORD]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[winterm.WORD]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key winterm.WORD, controlState winterm.DWORD, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState winterm.DWORD) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 00000000..9f3232c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,76 @@ +// +build windows + +package windows + +import ( + "io/ioutil" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +func newAnsiWriter(nFile int) *ansiWriter { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go new file mode 100644 index 00000000..3711d988 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go @@ -0,0 +1,61 @@ +// +build windows + +package windows + +import ( + "io" + "os" + "syscall" + + "github.com/Azure/go-ansiterm/winterm" +) + +// ConsoleStreams returns a wrapped version for each standard stream referencing a console, +// that handles ANSI character sequences. +func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + if IsConsole(os.Stdin.Fd()) { + stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if IsConsole(os.Stdout.Fd()) { + stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if IsConsole(os.Stderr.Fd()) { + stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return stdIn, stdOut, stdErr +} + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 00000000..bf4c7b50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,5 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windows diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 00000000..d9cb367d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 00000000..1137db51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go new file mode 100644 index 00000000..c001279f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go @@ -0,0 +1,68 @@ +package version + +import ( + "strconv" + "strings" +) + +// Version provides utility methods for comparing versions. +type Version string + +func (v Version) compareTo(other Version) int { + var ( + currTab = strings.Split(string(v), ".") + otherTab = strings.Split(string(other), ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// String returns the version string +func (v Version) String() string { + return string(v) +} + +// LessThan checks if a version is less than another +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 +} + +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/reference/reference.go b/Godeps/_workspace/src/github.com/docker/docker/reference/reference.go new file mode 100644 index 00000000..1e348247 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/reference/reference.go @@ -0,0 +1,191 @@ +package reference + +import ( + "fmt" + "strings" + + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/image/v1" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultHostname is the default built-in hostname + DefaultHostname = "docker.io" + // LegacyDefaultHostname is automatically converted to DefaultHostname + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := distreference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(distreference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + if tagged, isTagged := named.(distreference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name = normalize(name) + if err := validateName(name); err != nil { + return nil, err + } + r, err := distreference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := distreference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := distreference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + distreference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(distreference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(distreference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) string { + host, remoteName := splitHostname(name) + if host == DefaultHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix) + } + return remoteName + } + return name +} + +func validateName(name string) error { + if err := v1.ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/reference/store.go b/Godeps/_workspace/src/github.com/docker/docker/reference/store.go new file mode 100644 index 00000000..91c5c2ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/reference/store.go @@ -0,0 +1,298 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref Named + ImageID image.ID +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id image.ID) []Named + ReferencesByName(ref Named) []Association + AddTag(ref Named, id image.ID, force bool) error + AddDigest(ref Canonical, id image.ID, force bool) error + Delete(ref Named) (bool, error) + Get(ref Named) (image.ID, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[image.ID]map[string]Named +} + +// Repository maps tags to image IDs. The key is a a stringified Reference, +// including the repository name. +type repository map[string]image.ID + +type lexicalRefs []Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[image.ID]map[string]Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref Named, id image.ID, force bool) error { + if _, isCanonical := ref.(Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(WithDefaultTag(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error { + return store.addReference(ref, id, force) +} + +func (store *store) addReference(ref Named, id image.ID, force bool) error { + if ref.Name() == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + repository = make(map[string]image.ID) + store.Repositories[ref.Name()] = repository + } + + refStr := ref.String() + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref Named) (bool, error) { + ref = WithDefaultTag(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repoName := ref.Name() + + repository, exists := store.Repositories[repoName] + if !exists { + return false, ErrDoesNotExist + } + + refStr := ref.String() + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, repoName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by +func (store *store) Get(ref Named) (image.ID, error) { + ref = WithDefaultTag(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[ref.String()] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given image ID. The slice +// will be nil if there are no references to this image ID. +func (store *store) References(id image.ID) []Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref Named) []Association { + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ImageID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + + tempFilePath := store.jsonPath + ".tmp" + + if err := ioutil.WriteFile(tempFilePath, jsonData, 0600); err != nil { + return err + } + + if err := os.Rename(tempFilePath, store.jsonPath); err != nil { + return err + } + + return nil +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go b/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go new file mode 100644 index 00000000..7175598c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go @@ -0,0 +1,255 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// Login tries to register/login to the registry server. +func Login(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) { + // Separates the v2 registry login logic from the v1 logic. + if registryEndpoint.Version == APIVersion2 { + return loginV2(authConfig, registryEndpoint, "" /* scope */) + } + return loginV1(authConfig, registryEndpoint) +} + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) { + var ( + status string + respBody []byte + err error + respStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) + + logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) + + if serverAddress == "" { + return "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + // to avoid sending the server address to the server it should be removed before being marshaled + authCopy := *authConfig + authCopy.ServerAddress = "" + + jsonBody, err := json.Marshal(authCopy) + if err != nil { + return "", fmt.Errorf("Config Error: %s", err) + } + + // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. + b := strings.NewReader(string(jsonBody)) + resp1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) + if err != nil { + return "", fmt.Errorf("Server Error: %s", err) + } + defer resp1.Body.Close() + respStatusCode = resp1.StatusCode + respBody, err = ioutil.ReadAll(resp1.Body) + if err != nil { + return "", fmt.Errorf("Server Error: [%#v] %s", respStatusCode, err) + } + + if respStatusCode == 201 { + if loginAgainstOfficialIndex { + status = "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + } else { + // *TODO: Use registry configuration to determine what this says, if anything? + status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." + } + } else if respStatusCode == 400 { + if string(respBody) == "\"Username or email already exists\"" { + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + return "Login Succeeded", nil + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == 403 { + if loginAgainstOfficialIndex { + return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == 500 { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", fmt.Errorf("Internal Server Error") + } + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) + } + return "", fmt.Errorf("Registration: %s", respBody) + + } else if respStatusCode == 401 { + // This case would happen with private registries where /v1/users is + // protected, so people can use `docker login` as an auth check. + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + return "Login Succeeded", nil + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Unexpected status code [%d] : %s", respStatusCode, respBody) + } + return status, nil +} + +// loginV2 tries to login to the v2 registry server. The given registry endpoint has been +// pinged or setup with a list of authorization challenges. Each of these challenges are +// tried until one of them succeeds. Currently supported challenge schemes are: +// HTTP Basic Authorization +// Token Authorization with a separate token issuing server +// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For +// now, users should create their account through other means like directly from a web page +// served by the v2 registry service provider. Whether this will be supported in the future +// is to be determined. +func loginV2(authConfig *types.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) + var ( + err error + allErrors []error + ) + + for _, challenge := range registryEndpoint.AuthChallenges { + params := make(map[string]string, len(challenge.Parameters)+1) + for k, v := range challenge.Parameters { + params[k] = v + } + params["scope"] = scope + logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params) + + switch strings.ToLower(challenge.Scheme) { + case "basic": + err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint) + case "bearer": + err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint) + default: + // Unsupported challenge types are explicitly skipped. + err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) + } + + if err == nil { + return "Login Succeeded", nil + } + + logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) + + allErrors = append(allErrors, err) + } + + return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) +} + +func tryV2BasicAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { + req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) + if err != nil { + return err + } + + req.SetBasicAuth(authConfig.Username, authConfig.Password) + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return nil +} + +func tryV2TokenAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { + token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint) + if err != nil { + return err + } + + req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return nil +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == convertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go b/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go new file mode 100644 index 00000000..e300d82a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go @@ -0,0 +1,150 @@ +package registry + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +type octetType byte + +// AuthorizationChallenge carries information +// from a WWW-Authenticate response header. +type AuthorizationChallenge struct { + Scheme string + Parameters map[string]string +} + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []*AuthorizationChallenge { + var challenges []*AuthorizationChallenge + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + i; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config.go b/Godeps/_workspace/src/github.com/docker/docker/registry/config.go new file mode 100644 index 00000000..ec8ec271 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/config.go @@ -0,0 +1,257 @@ +package registry + +import ( + "errors" + "fmt" + "net" + "net/url" + "strings" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// Options holds command line options. +type Options struct { + Mirrors opts.ListOpts + InsecureRegistries opts.ListOpts +} + +const ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexServer is the v1 registry server used for user auth + account creation + IndexServer = DefaultV1Registry + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" + + // IndexServer = "https://registry-stage.hub.docker.com/v1/" +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = NewServiceConfig(nil) + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only = false +) + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + options.Mirrors = opts.NewListOpts(ValidateMirror) + cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) + options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) + cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) + cmd.BoolVar(&V2Only, []string{"-disable-legacy-registry"}, false, usageFn("Do not contact legacy registries")) +} + +// NewServiceConfig returns a new instance of ServiceConfig +func NewServiceConfig(options *Options) *registrytypes.ServiceConfig { + if options == nil { + options = &Options{ + Mirrors: opts.NewListOpts(nil), + InsecureRegistries: opts.NewListOpts(nil), + } + } + + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + options.InsecureRegistries.Set("127.0.0.0/8") + + config := ®istrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors.GetAll(), + } + // Split --insecure-registry into CIDR and registry-specific settings. + for _, r := range options.InsecureRegistries.GetAll() { + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*registrytypes.NetIPNet)(ipnet)) + } else { + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return config +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *registrytypes.ServiceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + host, _, err := net.SplitHostPort(indexName) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = indexName + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range config.InsecureRegistryCIDRs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return false + } + } + } + + return true +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + if val == reference.LegacyDefaultHostname { + val = reference.DefaultHostname + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + return val, nil +} + +func validateNoSchema(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *registrytypes.ServiceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *registrytypes.ServiceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, name.Hostname()) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(name.Name(), '/') + return &RepositoryInfo{name, index, official}, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config_unix.go b/Godeps/_workspace/src/github.com/docker/docker/registry/config_unix.go new file mode 100644 index 00000000..df970181 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/config_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +package registry + +const ( + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = "https://index.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = "https://registry-1.docker.io" +) + +var ( + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config_windows.go b/Godeps/_workspace/src/github.com/docker/docker/registry/config_windows.go new file mode 100644 index 00000000..d01b2618 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/config_windows.go @@ -0,0 +1,30 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" +) + +const ( + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = "https://registry-win-tp3.docker.io" + + // DefaultV2Registry is the URI of the default (official) v2 registry. + // This is the windows-specific endpoint. + // + // Currently it is a TEMPORARY link that allows Microsoft to continue + // development of Docker Engine for Windows. + DefaultV2Registry = "https://registry-win-tp3.docker.io" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go b/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go new file mode 100644 index 00000000..258a9c28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go @@ -0,0 +1,277 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. +func scanForAPIVersion(address string) (string, APIVersion) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + + for k, v := range apiVersions { + if apiVersionStr == v { + address = strings.Join(chunks[:len(chunks)-1], "/") + return address, k + } + } + + return address, APIVersionUnknown +} + +// NewEndpoint parses the given address to return a registry endpoint. v can be used to +// specify a specific endpoint version +func NewEndpoint(index *registrytypes.IndexInfo, metaHeaders http.Header, v APIVersion) (*Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + endpoint, err := newEndpoint(GetAuthConfigKey(index), tlsConfig, metaHeaders) + if err != nil { + return nil, err + } + if v != APIVersionUnknown { + endpoint.Version = v + } + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) { + var ( + endpoint = new(Endpoint) + trimmedAddress string + err error + ) + + if !strings.HasPrefix(address, "http") { + address = "https://" + address + } + + endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify) + + trimmedAddress, endpoint.Version = scanForAPIVersion(address) + + if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { + return nil, err + } + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...)) + return endpoint, nil +} + +// Endpoint stores basic information about a registry endpoint. +type Endpoint struct { + client *http.Client + URL *url.URL + Version APIVersion + IsSecure bool + AuthChallenges []*AuthorizationChallenge + URLBuilder *v2.URLBuilder +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *Endpoint) String() string { + return fmt.Sprintf("%s/v%d/", e.URL, e.Version) +} + +// VersionString returns a formatted string of this +// endpoint address using the given API Version. +func (e *Endpoint) VersionString(version APIVersion) string { + return fmt.Sprintf("%s/v%d/", e.URL, version) +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *Endpoint) Path(path string) string { + return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) +} + +// Ping pings the remote endpoint with v2 and v1 pings to determine the API +// version. It returns a PingResult containing the discovered version. The +// PingResult also indicates whether the registry is standalone or not. +func (e *Endpoint) Ping() (PingResult, error) { + // The ping logic to use is determined by the registry endpoint version. + switch e.Version { + case APIVersion1: + return e.pingV1() + case APIVersion2: + return e.pingV2() + } + + // APIVersionUnknown + // We should try v2 first... + e.Version = APIVersion2 + regInfo, errV2 := e.pingV2() + if errV2 == nil { + return regInfo, nil + } + + // ... then fallback to v1. + e.Version = APIVersion1 + regInfo, errV1 := e.pingV1() + if errV1 == nil { + return regInfo, nil + } + + e.Version = APIVersionUnknown + return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) +} + +func (e *Endpoint) pingV1() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} + +func (e *Endpoint) pingV2() (PingResult, error) { + logrus.Debugf("attempting v2 ping for registry endpoint %s", e) + + req, err := http.NewRequest("GET", e.Path(""), nil) + if err != nil { + return PingResult{}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{}, err + } + defer resp.Body.Close() + + // The endpoint may have multiple supported versions. + // Ensure it supports the v2 Registry API. + var supportsV2 bool + +HeaderLoop: + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { + for _, versionName := range strings.Fields(supportedVersions) { + if versionName == "registry/2.0" { + supportsV2 = true + break HeaderLoop + } + } + } + + if !supportsV2 { + return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) + } + + if resp.StatusCode == http.StatusOK { + // It would seem that no authentication/authorization is required. + // So we don't need to parse/add any authorization schemes. + return PingResult{Standalone: true}, nil + } + + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + e.AuthChallenges = parseAuthHeader(resp.Header) + return PingResult{}, nil + } + + return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go b/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go new file mode 100644 index 00000000..e15f83ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go @@ -0,0 +1,68 @@ +package registry + +import ( + "strings" + + "github.com/docker/distribution/digest" +) + +// Reference represents a tag or digest within a repository +type Reference interface { + // HasDigest returns whether the reference has a verifiable + // content addressable reference which may be considered secure. + HasDigest() bool + + // ImageName returns an image name for the given repository + ImageName(string) string + + // Returns a string representation of the reference + String() string +} + +type tagReference struct { + tag string +} + +func (tr tagReference) HasDigest() bool { + return false +} + +func (tr tagReference) ImageName(repo string) string { + return repo + ":" + tr.tag +} + +func (tr tagReference) String() string { + return tr.tag +} + +type digestReference struct { + digest digest.Digest +} + +func (dr digestReference) HasDigest() bool { + return true +} + +func (dr digestReference) ImageName(repo string) string { + return repo + "@" + dr.String() +} + +func (dr digestReference) String() string { + return dr.digest.String() +} + +// ParseReference parses a reference into either a digest or tag reference +func ParseReference(ref string) Reference { + if strings.Contains(ref, ":") { + dgst, err := digest.ParseDigest(ref) + if err == nil { + return digestReference{digest: dgst} + } + } + return tagReference{tag: ref} +} + +// DigestReference creates a digest reference using a digest +func DigestReference(dgst digest.Digest) Reference { + return digestReference{digest: dgst} +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go b/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go new file mode 100644 index 00000000..643fa56e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go @@ -0,0 +1,255 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") + errLoginRequired = errors.New("Authentication is required.") +) + +// dockerUserAgent is the User-Agent the Docker client uses to identify itself. +// It is populated on init(), comprising version information of different components. +var dockerUserAgent string + +func init() { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: dockerversion.Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: dockerversion.GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUserAgent = useragent.AppendVersions("", httpVersion...) + + if runtime.GOOS != "linux" { + V2Only = true + } +} + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir != "" { + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return &tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + // TODO(dmcgowan): Copy system pool + tlsConfig.RootCAs = x509.NewCertPool() + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers that ensure requests have +// the User-Agent header set to dockerUserAgent and that metaHeaders +// are added. +func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{ + transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}), + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns a HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// ShouldV2Fallback returns true if this error is a reason to fall back to v1. +func ShouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// ContinueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func ContinueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return ContinueOnError(v[0]) + case ErrNoSupport: + return ContinueOnError(v.Err) + case errcode.Error: + return ShouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + var cfg = tlsconfig.ServerDefault + tlsConfig = &cfg + } + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service.go b/Godeps/_workspace/src/github.com/docker/docker/registry/service.go new file mode 100644 index 00000000..dbdf1731 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/service.go @@ -0,0 +1,188 @@ +package registry + +import ( + "crypto/tls" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// Service is a registry service. It tracks configuration data such as a list +// of mirrors. +type Service struct { + Config *registrytypes.ServiceConfig +} + +// NewService returns a new instance of Service ready to be +// installed into an engine. +func NewService(options *Options) *Service { + return &Service{ + Config: NewServiceConfig(options), + } +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(authConfig *types.AuthConfig) (string, error) { + addr := authConfig.ServerAddress + if addr == "" { + // Use the official registry address if not specified. + addr = IndexServer + } + index, err := s.ResolveIndex(addr) + if err != nil { + return "", err + } + + endpointVersion := APIVersion(APIVersionUnknown) + if V2Only { + // Override the endpoint to only attempt a v2 ping + endpointVersion = APIVersion2 + } + + endpoint, err := NewEndpoint(index, nil, endpointVersion) + if err != nil { + return "", err + } + authConfig.ServerAddress = endpoint.String() + return Login(authConfig, endpoint) +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *Service) Search(term string, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) { + if err := validateNoSchema(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + index, err := newIndexInfo(s.Config, indexName) + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewEndpoint(index, http.Header(headers), APIVersionUnknown) + if err != nil { + return nil, err + } + + r, err := NewSession(endpoint.client, authConfig, endpoint) + if err != nil { + return nil, err + } + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName) + } + return r.SearchRepositories(remoteName) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(s.Config, name) +} + +// ResolveIndex takes indexName and returns index info +func (s *Service) ResolveIndex(name string) (*registrytypes.IndexInfo, error) { + return newIndexInfo(s.Config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL string + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) { + return newEndpoint(e.URL, e.TLSConfig, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *Service) TLSConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.Config, hostname)) +} + +func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + return s.TLSConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *Service) LookupPullEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { + return s.lookupEndpoints(repoName) +} + +// LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *Service) LookupPushEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { + allEndpoints, err := s.lookupEndpoints(repoName) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +func (s *Service) lookupEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(repoName) + if err != nil { + return nil, err + } + + if V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(repoName) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service_v1.go b/Godeps/_workspace/src/github.com/docker/docker/registry/service_v1.go new file mode 100644 index 00000000..340ce957 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/service_v1.go @@ -0,0 +1,56 @@ +package registry + +import ( + "fmt" + "strings" + + "github.com/docker/docker/reference" + "github.com/docker/go-connections/tlsconfig" +) + +func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + nameString := repoName.FullName() + if strings.HasPrefix(nameString, DefaultNamespace+"/") { + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV1Registry, + Version: APIVersion1, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + return endpoints, nil + } + + slashIndex := strings.IndexRune(nameString, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) + } + hostname := nameString[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: "http://" + hostname, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service_v2.go b/Godeps/_workspace/src/github.com/docker/docker/registry/service_v2.go new file mode 100644 index 00000000..f89326d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/service_v2.go @@ -0,0 +1,74 @@ +package registry + +import ( + "fmt" + "strings" + + "github.com/docker/docker/reference" + "github.com/docker/go-connections/tlsconfig" +) + +func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + nameString := repoName.FullName() + if strings.HasPrefix(nameString, DefaultNamespace+"/") { + // v2 mirrors + for _, mirror := range s.Config.Mirrors { + mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirror, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + slashIndex := strings.IndexRune(nameString, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) + } + hostname := nameString[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: "http://" + hostname, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/session.go b/Godeps/_workspace/src/github.com/docker/docker/registry/session.go new file mode 100644 index 00000000..57acbc0c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/session.go @@ -0,0 +1,769 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes a HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *Endpoint) (r *Session, err error) { + r = &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } + + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return nil, err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return nil, errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return r, nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := int64(-1) + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.ParseInt(hdr, 10, 64) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + statusCode = 0 + res, err = r.client.Do(req) + if err != nil { + logrus.Debugf("Error contacting registry %s: %v", registry, err) + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debugf("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debugf("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), name.RemoteName()) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if isTimeout(err) { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errLoginRequired + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote.RemoteName(), suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errLoginRequired + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string) (*registrytypes.SearchResults, error) { + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(registrytypes.SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +// GetAuthConfig returns the authentication settings for a session +// TODO(tiborvass): remove this once registry client v2 is vendored +func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &types.AuthConfig{ + Username: r.authConfig.Username, + Password: password, + Email: r.authConfig.Email, + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/token.go b/Godeps/_workspace/src/github.com/docker/docker/registry/token.go new file mode 100644 index 00000000..d91bd455 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/token.go @@ -0,0 +1,81 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +type tokenResponse struct { + Token string `json:"token"` +} + +func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) { + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + if realmURL.Scheme == "" { + if registryEndpoint.IsSecure { + realmURL.Scheme = "https" + } else { + realmURL.Scheme = "http" + } + } + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := params["scope"] + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if username != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := registryEndpoint.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/types.go b/Godeps/_workspace/src/github.com/docker/docker/registry/types.go new file mode 100644 index 00000000..ee88276e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/registry/types.go @@ -0,0 +1,70 @@ +package registry + +import ( + "github.com/docker/docker/reference" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// RepositoryData tracks the image list, list of endpoints, and list of tokens +// for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string + // Tokens is currently unused (remove it?) + Tokens []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in a HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +var apiVersions = map[APIVersion]string{ + 1: "v1", + 2: "v2", +} + +// API Version identifiers. +const ( + APIVersionUnknown = iota + APIVersion1 + APIVersion2 +) + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 00000000..e3d9a64d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE new file mode 100644 index 00000000..bc756ae3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE @@ -0,0 +1,21 @@ +Copyright 2012 SocialCode + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/RackSec/srslog/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/RackSec/srslog/LICENSE new file mode 100644 index 00000000..9269338f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/RackSec/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 00000000..f090cb42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE new file mode 100644 index 00000000..106569e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE new file mode 100644 index 00000000..004e77fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE new file mode 100644 index 00000000..b5768f89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/engine-api/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/engine-api/LICENSE new file mode 100644 index 00000000..c157bff9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/engine-api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-connections/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-connections/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-units/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-units/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code new file mode 100644 index 00000000..9e4bd4db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE new file mode 100644 index 00000000..6daf85e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE new file mode 100644 index 00000000..c8476ac0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sameer Naik + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/tuf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/tuf/LICENSE new file mode 100644 index 00000000..d92ae9ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/tuf/LICENSE @@ -0,0 +1,30 @@ +Copyright (c) 2015, Docker Inc. +Copyright (c) 2014-2015 Prime Directive, Inc. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Prime Directive, Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE new file mode 100644 index 00000000..1aa91de6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2013 Tatsuo Kaniwa + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE new file mode 100644 index 00000000..545cf2d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE new file mode 100644 index 00000000..670d88fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Georg Reinke (), Google +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..1b1b1921 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md new file mode 100644 index 00000000..ac2c0646 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +* The text contents of this website are MPL 2.0 licensed. + +* The design contents of this website are proprietary and may not be reproduced + or reused in any way other than to run the Consul website locally. The license + for the design is owned solely by HashiCorp, Inc. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE new file mode 100644 index 00000000..ccae99f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012, 2013 Ugorji Nwoke. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md new file mode 100644 index 00000000..36c29d7f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +* The text contents of this website are MPL 2.0 licensed. + +* The design contents of this website are proprietary and may not be reproduced + or reused in any way other than to run the Serf website locally. The license + for the design is owned solely by HashiCorp, Inc. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/imdario/mergo/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/imdario/mergo/LICENSE new file mode 100644 index 00000000..68668029 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/jfrazelle/go/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/jfrazelle/go/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/jfrazelle/go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License new file mode 100644 index 00000000..6b7558b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License @@ -0,0 +1,23 @@ +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE new file mode 100644 index 00000000..ca458bb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE new file mode 100644 index 00000000..b8b569d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/COPYRIGHT b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 00000000..35702b10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/LICENSE new file mode 100644 index 00000000..5763fa7f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/LICENSE @@ -0,0 +1,32 @@ +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/pkcs11/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/pkcs11/LICENSE new file mode 100644 index 00000000..ce25d13a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/pkcs11/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Miek Gieben. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Miek Gieben nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE new file mode 100644 index 00000000..f4c265cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2014, OmniTI Computer Consulting, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE new file mode 100644 index 00000000..bc00498c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/seccomp/libseccomp-golang/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/seccomp/libseccomp-golang/LICENSE new file mode 100644 index 00000000..81cf60de --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/seccomp/libseccomp-golang/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2015 Matthew Heon +Copyright (c) 2015 Paul Moore +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +- Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE new file mode 100644 index 00000000..80dd96de --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE @@ -0,0 +1,24 @@ +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE new file mode 100644 index 00000000..e50d398e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 The AUTHORS + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE new file mode 100644 index 00000000..14d60424 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE new file mode 100644 index 00000000..95a0f054 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE new file mode 100644 index 00000000..968b4538 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE @@ -0,0 +1,14 @@ +Copyright (c) 2013 Vaughan Newton + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE new file mode 100644 index 00000000..8ba54919 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE new file mode 100644 index 00000000..9f64db85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE new file mode 100644 index 00000000..9f64db85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/google.golang.org/grpc/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/google.golang.org/grpc/LICENSE new file mode 100644 index 00000000..f4988b45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/google.golang.org/grpc/LICENSE @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE new file mode 100644 index 00000000..f21e5408 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE b/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE new file mode 100644 index 00000000..c157bff9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go new file mode 100644 index 00000000..6cd4c36a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go @@ -0,0 +1,11 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go new file mode 100644 index 00000000..458a9c96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that hold device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that hold device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go new file mode 100644 index 00000000..81eb29c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go @@ -0,0 +1,236 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/image" + "github.com/docker/go-units" +) + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + ContainerID string + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + ContainerID string + RepositoryName string + Tag string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filter filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ContainerID string + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + ContainerID string + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + ContainerID string + Path string + Content io.Reader + AllowOverwriteDirWithFile bool +} + +// EventsOptions hold parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implement structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent image.PullBehavior + IsolationLevel container.IsolationLevel + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + BuildArgs map[string]string + AuthConfigs map[string]AuthConfig + Context io.Reader +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + Parent string // Parent is the name of the image to pull + Tag string // Tag is the name to tag this image with + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) + RepositoryName string // RepositoryName is the name of the repository to import this image into + Message string // Message is the message to tag the image with + Tag string // Tag is the name to tag this image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + MatchName string + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + ImageID string // ImageID is the name of the image to pull + Tag string // Tag is the name of the tag to be pulled + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + ImageID string + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + Term string + RegistryAuth string +} + +// ImageTagOptions holds parameters to tag an image +type ImageTagOptions struct { + ImageID string + RepositoryName string + Tag string + Force bool +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + ID string + Height int + Width int +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK return true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go new file mode 100644 index 00000000..6874a037 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go @@ -0,0 +1,54 @@ +package types + +import ( + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that hold the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + Container string // Name of the container (to execute in) + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard output + AttachStdout bool // Attach the standard error + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Cmd []string // Execution commands and args +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go new file mode 100644 index 00000000..b4e6205d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go @@ -0,0 +1,38 @@ +package container + +import ( + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + PublishService string `json:",omitempty"` // Name of the network service exposed by the container + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd *strslice.StrSlice // Command to run when starting the container + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint *strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go new file mode 100644 index 00000000..d2b7c0c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go @@ -0,0 +1,241 @@ +package container + +import ( + "strings" + + "github.com/docker/engine-api/types/blkiodev" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsolationLevel represents the isolation level of a container. The supported +// values are platform specific +type IsolationLevel string + +// IsDefault indicates the default isolation level of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i IsolationLevel) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses it's private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses it's private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid stack of the container. +type PidMode string + +// IsPrivate indicates whether the container uses it's private pid stack. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's pid stack. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the pid stack is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the contain will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + KernelMemory int64 // Kernel memory limit (in bytes) + Memory int64 // Memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd *strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop *strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + ShmSize int64 // Total shm memory usage + + // Applicable to Windows + ConsoleSize [2]int // Initial console size + Isolation IsolationLevel // Isolation level of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go new file mode 100644 index 00000000..775e903f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go @@ -0,0 +1,87 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates is an isolation level is valid +func (i IsolationLevel) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses it's private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go new file mode 100644 index 00000000..0140c957 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go @@ -0,0 +1,85 @@ +package container + +import ( + "fmt" + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i IsolationLevel) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i IsolationLevel) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates is an isolation level is valid +func (i IsolationLevel) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() NetworkMode { + return NetworkMode("default") +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } + return "" +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *Config, hc *HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + switch mode := parts[0]; mode { + case "default", "none": + default: + return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + } + return nil +} + +// ValidateIsolationLevel performs platform specific validation of the +// isolation level in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func ValidateIsolationLevel(hc *HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go new file mode 100644 index 00000000..c5987aaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go @@ -0,0 +1,38 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go new file mode 100644 index 00000000..9c80b1ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go @@ -0,0 +1,257 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains a aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and store values for the same key +// in an map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into an string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to march full name value to avoid O(N) regular expression matching + if fieldValues[source] { + return true + } + return false +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/image/pull_behavior.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/image/pull_behavior.go new file mode 100644 index 00000000..d30df2ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/image/pull_behavior.go @@ -0,0 +1,41 @@ +package image + +import ( + "fmt" +) + +// PullBehavior can be one of: never, always, or missing +type PullBehavior int + +// PullBehavior can be one of: never, always, or missing +const ( + PullNever PullBehavior = iota + PullAlways + PullMissing +) + +// ParsePullBehavior validates and converts a string into a PullBehavior +func ParsePullBehavior(pullVal string) (PullBehavior, error) { + switch pullVal { + case "never": + return PullNever, nil + case "always": + return PullAlways, nil + case "missing", "": + return PullMissing, nil + } + return PullNever, fmt.Errorf("Invalid pull behavior '%s'", pullVal) +} + +// String returns a string representation of a PullBehavior +func (p PullBehavior) String() string { + switch p { + case PullNever: + return "never" + case PullAlways: + return "always" + case PullMissing: + return "missing" + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go new file mode 100644 index 00000000..48b21996 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go @@ -0,0 +1,52 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networink configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each conencting network +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go new file mode 100644 index 00000000..4fcf986e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go @@ -0,0 +1,101 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial indicates whether the result is an official repository or not + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsOfficial indicates whether the result is trusted + IsTrusted bool `json:"is_trusted"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go new file mode 100644 index 00000000..e0305a9e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go @@ -0,0 +1,68 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + Architectures []Arch `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Syscall is used to match a syscall in Seccomp +type Syscall struct { + Name string `json:"name"` + Action Action `json:"action"` + Args []*Arg `json:"args"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go new file mode 100644 index 00000000..55081ae4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go @@ -0,0 +1,112 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + CPUUsage CPUUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates All memory stats since container inception +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// TODO Windows: This can be factored out +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write +// TODO Windows: This can be factored out +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// NetworkStats aggregates All network stats of one container +// TODO Windows: This will require refactoring +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + Read time.Time `json:"read"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + PidsStats PidsStats `json:"pids_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go new file mode 100644 index 00000000..9f3ee620 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go @@ -0,0 +1,71 @@ +package strslice + +import ( + "encoding/json" + "strings" +) + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice struct { + parts []string +} + +// MarshalJSON Marshals (or serializes) the StrSlice into the json format. +// This method is needed to implement json.Marshaller. +func (e *StrSlice) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of strings. +// This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + e.parts = p + return nil +} + +// Len returns the number of parts of the StrSlice. +func (e *StrSlice) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +// Slice gets the parts of the StrSlice as a Slice of string. +func (e *StrSlice) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +// ToString gets space separated string of all the parts. +func (e *StrSlice) ToString() string { + s := e.Slice() + if s == nil { + return "" + } + return strings.Join(s, " ") +} + +// New creates an StrSlice based on the specified parts (as strings). +func New(parts ...string) *StrSlice { + return &StrSlice{parts} +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go new file mode 100644 index 00000000..d3695ba7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go new file mode 100644 index 00000000..b573ff45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go @@ -0,0 +1,433 @@ +package types + +import ( + "os" + "time" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" + "github.com/docker/go-connections/nat" +) + +// ContainerCreateResponse contains the information returned to a client on the +// creation of a new container. +type ContainerCreateResponse struct { + // ID is the ID of the created container. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the creation of the container. + Warnings []string `json:"Warnings"` +} + +// ContainerExecCreateResponse contains response of Remote API: +// POST "/containers/{name:.*}/exec" +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` +} + +// ContainerUpdateResponse contains response of Remote API: +// POST /containers/{name:.*}/update +type ContainerUpdateResponse struct { + // Warnings are any warnings encountered during the updating of the container. + Warnings []string `json:"Warnings"` +} + +// AuthResponse contains response of Remote API: +// POST "/auth" +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` +} + +// ContainerWaitResponse contains response of Remote API: +// POST "/containers/"+containerID+"/wait" +type ContainerWaitResponse struct { + // StatusCode is the status code of the wait job + StatusCode int `json:"StatusCode"` +} + +// ContainerCommitResponse contains response of Remote API: +// POST "/commit?container="+containerID +type ContainerCommitResponse struct { + ID string `json:"Id"` +} + +// ContainerChange contains response of Remote API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Remote API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Remote API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// Image contains response of Remote API: +// GET "/images/json" +type Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// ImageInspect contains response of Remote API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData +} + +// Port stores open ports info of container +// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} +type Port struct { + IP string `json:",omitempty"` + PrivatePort int + PublicPort int `json:",omitempty"` + Type string +} + +// Container contains response of Remote API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings +} + +// CopyConfig contains request body of Remote API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerProcessList contains response of Remote API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Version contains response of Remote API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Info contains response of Remote API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + InitSha1 string + InitPath string + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string +} + +// PluginsInfo is temp struct holds Plugins name +// registered with docker daemon. It used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string +} + +// ContainerJSONBase contains response of Remote API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + IPAddress string + IPPrefixLen int + IPv6Gateway string + MacAddress string +} + +// MountPoint represents a mount point configuration inside the container. +type MountPoint struct { + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation string +} + +// Volume represents the configuration of a volume for the remote API +type Volume struct { + Name string // Name is the name of the volume + Driver string // Driver is the Driver name used to create the volume + Mountpoint string // Mountpoint is the location on disk of the volume +} + +// VolumesListResponse contains the response for the remote API: +// GET "/volumes" +type VolumesListResponse struct { + Volumes []*Volume // Volumes is the list of volumes being returned + Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers +} + +// VolumeCreateRequest contains the response for the remote API: +// POST "/volumes/create" +type VolumeCreateRequest struct { + Name string // Name is the requested name of the volume + Driver string // Driver is the name of the driver that should be used to create the volume + DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + IPAM network.IPAM + Internal bool + Containers map[string]EndpointResource + Options map[string]string +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + Name string + CheckDuplicate bool + Driver string + IPAM network.IPAM + Internal bool + Options map[string]string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md new file mode 100644 index 00000000..76c516e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go new file mode 100644 index 00000000..4ed43358 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go new file mode 100644 index 00000000..ed800061 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE b/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go b/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 00000000..3d469165 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,223 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + if len(portStr) == 0 { + return 0 + } + + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := strconv.ParseUint(portStr, 10, 16) + return int(port) +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := PartParser(portSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIP = parts["ip"] + hostPort = parts["hostPort"] + ) + + if rawIP != "" && net.ParseIP(rawIP) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) + } + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, nil, err + } + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIP: rawIP, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + } + return exposedPorts, bindings, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go b/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 00000000..87205020 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,56 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go b/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 00000000..ce950171 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 00000000..c08e53a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,127 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. +var ServerDefault = tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, +} + +// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. +var ClientDefault = tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + certPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + s := certPool.Subjects() + subjects := make([]string, len(s)) + for i, subject := range s { + subjects[i] = string(subject) + } + logrus.Debugf("Trusting certs with subjects: %v", subjects) + return certPool, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + if options.CertFile != "" && options.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + + return &tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + return &tlsConfig, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 00000000..6b4c6a7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 00000000..ee22df47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 00000000..9ea86d78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 00000000..477be8b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/Godeps/_workspace/src/github.com/docker/go-units/README.md b/Godeps/_workspace/src/github.com/docker/go-units/README.md new file mode 100644 index 00000000..3ce4d79d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/README.md @@ -0,0 +1,18 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/circle.yml b/Godeps/_workspace/src/github.com/docker/go-units/circle.yml new file mode 100644 index 00000000..9043b354 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/Godeps/_workspace/src/github.com/docker/go-units/duration.go b/Godeps/_workspace/src/github.com/docker/go-units/duration.go new file mode 100644 index 00000000..c219a8a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/duration.go @@ -0,0 +1,33 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/size.go b/Godeps/_workspace/src/github.com/docker/go-units/size.go new file mode 100644 index 00000000..3b59daff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/size.go @@ -0,0 +1,95 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + for size >= base { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go b/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go new file mode 100644 index 00000000..5ac7fd82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..8e7db381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go new file mode 100644 index 00000000..00bbe4b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/Godeps/_workspace/src/github.com/docker/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go new file mode 100644 index 00000000..89debf6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go @@ -0,0 +1,94 @@ +package testutil + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" +) + +// GenerateTrustCA generates a new certificate authority for testing. +func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "CA Root", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateIntermediate generates an intermediate certificate for testing using +// the parent certificate (likely a CA) and the provided keys. +func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Intermediate", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateTrustCert generates a new trust certificate for testing. Unlike the +// intermediate certificates, this certificate should be used for signature +// only, not creating certificates. +func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Trust Cert", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md new file mode 100644 index 00000000..24124db2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md @@ -0,0 +1,50 @@ +## Libtrust TLS Config Demo + +This program generates key pairs and trust files for a TLS client and server. + +To generate the keys, run: + +``` +$ go run genkeys.go +``` + +The generated files are: + +``` +$ ls -l client_data/ server_data/ +client_data/: +total 24 +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json + +server_data/: +total 24 +-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +``` + +The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. + +To start the server, run: + +``` +$ go run server.go +``` + +This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. + +To make a request using the client, run: + +``` +$ go run client.go +``` + +This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. + +The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). + +``` +curl --cert cert.pem --key key.pem -k https://localhost:8888 +``` diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go new file mode 100644 index 00000000..0a699a0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go @@ -0,0 +1,89 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + // Load Client Key. + clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate Client Certificate. + selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) + if err != nil { + log.Fatal(err) + } + + // Load trusted host keys. + hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + // Ensure the host we want to connect to is trusted! + host, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) + if err != nil { + log.Fatalf("%q is not a known and trusted host", host) + } + + // Generate a CA pool with the trusted host's key. + caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) + if err != nil { + log.Fatal(err) + } + + // Create HTTP Client. + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedClientCert.Raw}, + PrivateKey: clientKey.CryptoPrivateKey(), + Leaf: selfSignedClientCert, + }, + }, + RootCAs: caPool, + }, + }, + } + + var makeRequest = func(url string) { + resp, err := client.Get(url) + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Fatal(err) + } + + log.Println(resp.Status) + log.Println(string(body)) + } + + // Make the request to the trusted server! + makeRequest(fmt.Sprintf("https://%s", serverAddress)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go new file mode 100644 index 00000000..c65f3b6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go @@ -0,0 +1,62 @@ +package main + +import ( + "encoding/pem" + "fmt" + "log" + "net" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + clientPrivateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) + if err != nil { + log.Fatal(err) + } + + keyPEMBlock, err := key.PEMBlock() + if err != nil { + log.Fatal(err) + } + + encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) + fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) + + cert, err := libtrust.GenerateSelfSignedClientCert(key) + if err != nil { + log.Fatal(err) + } + + encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) + + trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + hostname, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + + trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) + if err != nil { + log.Fatal(err) + } + + caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) + if err != nil { + log.Fatal(err) + } + + encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) + fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go new file mode 100644 index 00000000..9dc8842a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go @@ -0,0 +1,61 @@ +package main + +import ( + "log" + + "github.com/docker/libtrust" +) + +func main() { + // Generate client key. + clientKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Add a comment for the client key. + clientKey.AddExtendedField("comment", "TLS Demo Client") + + // Save the client key, public and private versions. + err = libtrust.SaveKey("client_data/private_key.pem", clientKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate server key. + serverKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Set the list of addresses to use for the server. + serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) + + // Save the server key, public and private versions. + err = libtrust.SaveKey("server_data/private_key.pem", serverKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Authorized Keys file for server. + err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Known Host Keys file for client. + err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go new file mode 100644 index 00000000..d3cb2ea9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go @@ -0,0 +1,80 @@ +package main + +import ( + "crypto/tls" + "fmt" + "html" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "server_data/private_key.pem" + authorizedClientsFilename = "server_data/trusted_clients.pem" +) + +func requestHandler(w http.ResponseWriter, r *http.Request) { + clientCert := r.TLS.PeerCertificates[0] + keyID := clientCert.Subject.CommonName + log.Printf("Request from keyID: %s\n", keyID) + fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) +} + +func main() { + // Load server key. + serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate server certificate. + selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( + serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, + ) + if err != nil { + log.Fatal(err) + } + + // Load authorized client keys. + authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) + if err != nil { + log.Fatal(err) + } + + // Create CA pool using trusted client keys. + caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) + if err != nil { + log.Fatal(err) + } + + // Create TLS config, requiring client certificates. + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedServerCert.Raw}, + PrivateKey: serverKey.CryptoPrivateKey(), + Leaf: selfSignedServerCert, + }, + }, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: caPool, + } + + // Create HTTP server with simple request handler. + server := &http.Server{ + Addr: serverAddress, + Handler: http.HandlerFunc(requestHandler), + } + + // Listen and server HTTPS using the libtrust TLS config. + listener, err := net.Listen("tcp", server.Addr) + if err != nil { + log.Fatal(err) + } + tlsListener := tls.NewListener(listener, tlsConfig) + server.Serve(tlsListener) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go new file mode 100644 index 00000000..72b0fc36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go @@ -0,0 +1,50 @@ +package trustgraph + +import "github.com/docker/libtrust" + +// TrustGraph represents a graph of authorization mapping +// public keys to nodes and grants between nodes. +type TrustGraph interface { + // Verifies that the given public key is allowed to perform + // the given action on the given node according to the trust + // graph. + Verify(libtrust.PublicKey, string, uint16) (bool, error) + + // GetGrants returns an array of all grant chains which are used to + // allow the requested permission. + GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) +} + +// Grant represents a transfer of permission from one part of the +// trust graph to another. This is the only way to delegate +// permission between two different sub trees in the graph. +type Grant struct { + // Subject is the namespace being granted + Subject string + + // Permissions is a bit map of permissions + Permission uint16 + + // Grantee represents the node being granted + // a permission scope. The grantee can be + // either a namespace item or a key id where namespace + // items will always start with a '/'. + Grantee string + + // statement represents the statement used to create + // this object. + statement *Statement +} + +// Permissions +// Read node 0x01 (can read node, no sub nodes) +// Write node 0x02 (can write to node object, cannot create subnodes) +// Read subtree 0x04 (delegates read to each sub node) +// Write subtree 0x08 (delegates write to each sub node, included create on the subject) +// +// Permission shortcuts +// ReadItem = 0x01 +// WriteItem = 0x03 +// ReadAccess = 0x07 +// WriteAccess = 0x0F +// Delegate = 0x0F diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go new file mode 100644 index 00000000..247bfa7a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go @@ -0,0 +1,133 @@ +package trustgraph + +import ( + "strings" + + "github.com/docker/libtrust" +) + +type grantNode struct { + grants []*Grant + children map[string]*grantNode +} + +type memoryGraph struct { + roots map[string]*grantNode +} + +func newGrantNode() *grantNode { + return &grantNode{ + grants: []*Grant{}, + children: map[string]*grantNode{}, + } +} + +// NewMemoryGraph returns a new in memory trust graph created from +// a static list of grants. This graph is immutable after creation +// and any alterations should create a new instance. +func NewMemoryGraph(grants []*Grant) TrustGraph { + roots := map[string]*grantNode{} + for _, grant := range grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + if part != "" { + node.grants = append(node.grants, grant) + } + nodes = node.children + } + } + return &memoryGraph{roots} +} + +func (g *memoryGraph) getGrants(name string) []*Grant { + nameParts := strings.Split(name, "/") + nodes := g.roots + var node *grantNode + var nodeOk bool + for _, part := range nameParts { + node, nodeOk = nodes[part] + if !nodeOk { + return nil + } + nodes = node.children + } + return node.grants +} + +func isSubName(name, sub string) bool { + if strings.HasPrefix(name, sub) { + if len(name) == len(sub) || name[len(sub)] == '/' { + return true + } + } + return false +} + +type walkFunc func(*Grant, []*Grant) bool + +func foundWalkFunc(*Grant, []*Grant) bool { + return true +} + +func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { + if visited == nil { + visited = map[*Grant]bool{} + } + grants := g.getGrants(start) + subGrants := make([]*Grant, 0, len(grants)) + for _, grant := range grants { + if visited[grant] { + continue + } + visited[grant] = true + if grant.Permission&permission == permission { + if isSubName(target, grant.Subject) { + if f(grant, chain) { + return true + } + } else { + subGrants = append(subGrants, grant) + } + } + } + for _, grant := range subGrants { + var chainCopy []*Grant + if collect { + chainCopy = make([]*Grant, len(chain)+1) + copy(chainCopy, chain) + chainCopy[len(chainCopy)-1] = grant + } else { + chainCopy = nil + } + + if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { + return true + } + } + return false +} + +func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { + return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil +} + +func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { + grants := [][]*Grant{} + collect := func(grant *Grant, chain []*Grant) bool { + grantChain := make([]*Grant, len(chain)+1) + copy(grantChain, chain) + grantChain[len(grantChain)-1] = grant + grants = append(grants, grantChain) + return false + } + g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) + return grants, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go new file mode 100644 index 00000000..7a74b553 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go @@ -0,0 +1,227 @@ +package trustgraph + +import ( + "crypto/x509" + "encoding/json" + "io" + "io/ioutil" + "sort" + "strings" + "time" + + "github.com/docker/libtrust" +) + +type jsonGrant struct { + Subject string `json:"subject"` + Permission uint16 `json:"permission"` + Grantee string `json:"grantee"` +} + +type jsonRevocation struct { + Subject string `json:"subject"` + Revocation uint16 `json:"revocation"` + Grantee string `json:"grantee"` +} + +type jsonStatement struct { + Revocations []*jsonRevocation `json:"revocations"` + Grants []*jsonGrant `json:"grants"` + Expiration time.Time `json:"expiration"` + IssuedAt time.Time `json:"issuedAt"` +} + +func (g *jsonGrant) Grant(statement *Statement) *Grant { + return &Grant{ + Subject: g.Subject, + Permission: g.Permission, + Grantee: g.Grantee, + statement: statement, + } +} + +// Statement represents a set of grants made from a verifiable +// authority. A statement has an expiration associated with it +// set by the authority. +type Statement struct { + jsonStatement + + signature *libtrust.JSONSignature +} + +// IsExpired returns whether the statement has expired +func (s *Statement) IsExpired() bool { + return s.Expiration.Before(time.Now().Add(-10 * time.Second)) +} + +// Bytes returns an indented json representation of the statement +// in a byte array. This value can be written to a file or stream +// without alteration. +func (s *Statement) Bytes() ([]byte, error) { + return s.signature.PrettySignature("signatures") +} + +// LoadStatement loads and verifies a statement from an input stream. +func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + js, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return nil, err + } + payload, err := js.Payload() + if err != nil { + return nil, err + } + var statement Statement + err = json.Unmarshal(payload, &statement.jsonStatement) + if err != nil { + return nil, err + } + + if authority == nil { + _, err = js.Verify() + if err != nil { + return nil, err + } + } else { + _, err = js.VerifyChains(authority) + if err != nil { + return nil, err + } + } + statement.signature = js + + return &statement, nil +} + +// CreateStatements creates and signs a statement from a stream of grants +// and revocations in a JSON array. +func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) + if err != nil { + return nil, err + } + err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) + if err != nil { + return nil, err + } + statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) + statement.jsonStatement.IssuedAt = time.Now().UTC() + + b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + statement.signature, err = libtrust.NewJSONSignature(b) + if err != nil { + return nil, err + } + err = statement.signature.SignWithChain(key, chain) + if err != nil { + return nil, err + } + + return &statement, nil +} + +type statementList []*Statement + +func (s statementList) Len() int { + return len(s) +} + +func (s statementList) Less(i, j int) bool { + return s[i].IssuedAt.Before(s[j].IssuedAt) +} + +func (s statementList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// CollapseStatements returns a single list of the valid statements as well as the +// time when the next grant will expire. +func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { + sorted := make(statementList, 0, len(statements)) + for _, statement := range statements { + if useExpired || !statement.IsExpired() { + sorted = append(sorted, statement) + } + } + sort.Sort(sorted) + + var minExpired time.Time + var grantCount int + roots := map[string]*grantNode{} + for i, statement := range sorted { + if statement.Expiration.Before(minExpired) || i == 0 { + minExpired = statement.Expiration + } + for _, grant := range statement.Grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + g := grant.Grant(statement) + grantCount = grantCount + 1 + + for _, part := range parts { + node, nodeOk := nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + node.grants = append(node.grants, g) + nodes = node.children + } + } + + for _, revocation := range statement.Revocations { + parts := strings.Split(revocation.Grantee, "/") + nodes := roots + + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + break + } + nodes = node.children + } + if node != nil { + for _, grant := range node.grants { + if isSubName(grant.Subject, revocation.Subject) { + grant.Permission = grant.Permission &^ revocation.Revocation + } + } + } + } + } + + retGrants := make([]*Grant, 0, grantCount) + for _, rootNodes := range roots { + retGrants = append(retGrants, rootNodes.grants...) + } + + return retGrants, minExpired, nil +} + +// FilterStatements filters the statements to statements including the given grants. +func FilterStatements(grants []*Grant) ([]*Statement, error) { + statements := map[*Statement]bool{} + for _, grant := range grants { + if grant.statement != nil { + statements[grant.statement] = true + } + } + retStatements := make([]*Statement, len(statements)) + var i int + for statement := range statements { + retStatements[i] = statement + i++ + } + return retStatements, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/Godeps/_workspace/src/github.com/docker/libtrust/util.go new file mode 100644 index 00000000..d88176cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 00000000..f983b60c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,8 @@ +language: go +sudo: false + +go: + - 1.3 + - 1.4 + - 1.5 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/gorilla/context/README.md new file mode 100644 index 00000000..c60a31b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/gorilla/context/context.go new file mode 100644 index 00000000..81cb128b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/gorilla/context/doc.go new file mode 100644 index 00000000..73c74003 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 00000000..83ab8f59 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.3 + - 1.4 + - 1.5 + - tip +install: + - go get golang.org/x/tools/cmd/vet +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/gorilla/mux/README.md new file mode 100644 index 00000000..b987c9e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/README.md @@ -0,0 +1,240 @@ +mux +=== +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) + +Package `gorilla/mux` implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts and paths can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +vars := mux.Vars(request) +category := vars["category"] +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + http.ListenAndServe(":8000", r) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go new file mode 100644 index 00000000..49798cb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go @@ -0,0 +1,206 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go new file mode 100644 index 00000000..aabe9958 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go @@ -0,0 +1,481 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = http.NotFoundHandler() + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVars registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + if val != nil { + context.Set(r, varsKey, val) + } +} + +func setCurrentRoute(r *http.Request, val interface{}) { + if val != nil { + context.Set(r, routeKey, val) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string paramers to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go new file mode 100644 index 00000000..06728dd5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go @@ -0,0 +1,317 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]*" + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + varIdx := i / 2 + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[varIdx] = name + varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.matchQueryString(req) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getUrlQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getUrlQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getUrlQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + subexpNames := v.host.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.host.varsN[varName]] = hostVars[i+1] + varName++ + } + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + subexpNames := v.path.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[v.path.varsN[varName]] = pathVars[i+1] + varName++ + } + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) + if queryVars != nil { + subexpNames := q.regexp.SubexpNames() + varName := 0 + for i, name := range subexpNames[1:] { + if name != "" && name == varGroupName(varName) { + m.Vars[q.varsN[varName]] = queryVars[i+1] + varName++ + } + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/gorilla/mux/route.go new file mode 100644 index 00000000..913432c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/route.go @@ -0,0 +1,595 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// Regular expressions can be used with headers as well. +// It accepts a sequence of key/value pairs, where the value has regex support. For example +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE b/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE b/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE new file mode 100644 index 00000000..5c97abce --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE @@ -0,0 +1,17 @@ +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS new file mode 100644 index 00000000..edbe2006 --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go new file mode 100644 index 00000000..6f8a982f --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go new file mode 100644 index 00000000..758b734c --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go new file mode 100644 index 00000000..72179488 --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdPath() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupPath() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go new file mode 100644 index 00000000..e6375ea4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go @@ -0,0 +1,418 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( + userArg, groupArg string + name string + ) + + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + return u.Uid == user.Uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + user.Uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + + // Check if user is a member. + for _, u := range g.List { + if u == name { + return true + } + } + + return false + }) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + user.Gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + user.Gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + var groups = []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.Atoi(ag) + if err != nil { + return nil, fmt.Errorf("Unable to find group %s", ag) + } + // Ensure gid is inside gid range. + if gid < minId || gid > maxId { + return nil, ErrRange + } + gidMap[gid] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + group, err := os.Open(groupPath) + if err == nil { + defer group.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/LICENSE b/Godeps/_workspace/src/github.com/vbatts/tar-split/LICENSE new file mode 100644 index 00000000..ca03685b --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/common.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/common.go new file mode 100644 index 00000000..c31df062 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/common.go @@ -0,0 +1,329 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/reader.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/reader.go new file mode 100644 index 00000000..4168ea2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/reader.go @@ -0,0 +1,943 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader + + RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this. + rawBytes *bytes.Buffer // last raw bits +} + +// RawBytes accesses the raw bytes of the archive, apart from the file payload itself. +// This includes the header and padding. +// +// This call resets the current rawbytes buffer +// +// Only when RawAccounting is enabled, otherwise this returns nil +func (tr *Reader) RawBytes() []byte { + if !tr.RawAccounting { + return nil + } + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } + // if we've read them, then flush them. + defer tr.rawBytes.Reset() + return tr.rawBytes.Bytes() +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive. +type sparseFileReader struct { + rfr *regFileReader // reads the sparse-encoded file data + sp []sparseEntry // the sparse map for the file + pos int64 // keeps track of file position + tot int64 // total size of the file +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + var hdr *Header + if tr.RawAccounting { + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } else { + tr.rawBytes.Reset() + } + } + if tr.err == nil { + tr.skipUnread() + } + if tr.err != nil { + return hdr, tr.err + } + hdr = tr.readHeader() + if hdr == nil { + return hdr, tr.err + } + // Check for PAX/GNU header. + switch hdr.Typeflag { + case TypeXHeader: + // PAX extended header + headers, err := parsePAX(tr) + if err != nil { + return nil, err + } + // We actually read the whole file, + // but this skips alignment padding + tr.skipUnread() + if tr.err != nil { + return nil, tr.err + } + hdr = tr.readHeader() + if hdr == nil { + return nil, tr.err + } + mergePAX(hdr, headers) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } + return hdr, nil + case TypeGNULongName: + // We have a GNU long name header. Its contents are the real file name. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + var buf []byte + if tr.RawAccounting { + if _, err = tr.rawBytes.Write(realname); err != nil { + return nil, err + } + buf = make([]byte, tr.rawBytes.Len()) + copy(buf[:], tr.RawBytes()) + } + hdr, err := tr.Next() + // since the above call to Next() resets the buffer, we need to throw the bytes over + if tr.RawAccounting { + buf = append(buf, tr.RawBytes()...) + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + hdr.Name = cString(realname) + return hdr, err + case TypeGNULongLink: + // We have a GNU long link header. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + var buf []byte + if tr.RawAccounting { + if _, err = tr.rawBytes.Write(realname); err != nil { + return nil, err + } + buf = make([]byte, tr.rawBytes.Len()) + copy(buf[:], tr.RawBytes()) + } + hdr, err := tr.Next() + // since the above call to Next() resets the buffer, we need to throw the bytes over + if tr.RawAccounting { + buf = append(buf, tr.RawBytes()...) + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + hdr.Linkname = cString(realname) + return hdr, err + } + return hdr, tr.err +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(buf) > 0 { + // or the header was empty to start with. + var sp int + // The size field ends at the first space. + sp = bytes.IndexByte(buf, ' ') + if sp == -1 { + return nil, ErrHeader + } + // Parse the first token as a decimal integer. + n, err := strconv.ParseInt(string(buf[:sp]), 10, 0) + if err != nil || n < 5 || int64(len(buf)) < n { + return nil, ErrHeader + } + // Extract everything between the decimal and the n -1 on the + // beginning to eat the ' ', -1 on the end to skip the newline. + var record []byte + record, buf = buf[sp+1:n-1], buf[n:] + // The first equals is guaranteed to mark the end of the key. + // Everything else is value. + eq := bytes.IndexByte(record, '=') + if eq == -1 { + return nil, ErrHeader + } + key, value := record[:eq], record[eq+1:] + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.Write(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() + } + return headers, nil +} + +// cString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func cString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +func (tr *Reader) octal(b []byte) int64 { + // Check for binary format first. + if len(b) > 0 && b[0]&0x80 != 0 { + var x int64 + for i, c := range b { + if i == 0 { + c &= 0x7f // ignore signal bit in first byte + } + x = x<<8 | int64(c) + } + return x + } + + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, err := strconv.ParseUint(cString(b), 8, 64) + if err != nil { + tr.err = err + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. +func (tr *Reader) skipUnread() { + nr := tr.numBytes() + tr.pad // number of bytes to skip + tr.curr, tr.pad = nil, 0 + if tr.RawAccounting { + _, tr.err = io.CopyN(tr.rawBytes, tr.r, nr) + return + } + if sr, ok := tr.r.(io.Seeker); ok { + if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { + return + } + } + _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr) +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + given := tr.octal(header[148:156]) + unsigned, signed := checksum(header) + return given == unsigned || given == signed +} + +func (tr *Reader) readHeader() *Header { + header := tr.hdrBuff[:] + copy(header, zeroBlock) + + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + // because it could read some of the block, but reach EOF first + if tr.err == io.EOF && tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + // because it could read some of the block, but reach EOF first + if tr.err == io.EOF && tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + hdr := new(Header) + s := slicer(header) + + hdr.Name = cString(s.next(100)) + hdr.Mode = tr.octal(s.next(8)) + hdr.Uid = int(tr.octal(s.next(8))) + hdr.Gid = int(tr.octal(s.next(8))) + hdr.Size = tr.octal(s.next(12)) + if hdr.Size < 0 { + tr.err = ErrHeader + return nil + } + hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = cString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case magic == "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = cString(s.next(32)) + hdr.Gname = cString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = tr.octal(devmajor) + hdr.Devminor = tr.octal(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = cString(s.next(155)) + case "star": + prefix = cString(s.next(131)) + hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0) + hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if tr.err != nil { + tr.err = ErrHeader + return nil + } + + // Maximum value of hdr.Size is 64 GB (12 octal digits), + // so there's no risk of int64 overflowing. + nb := int64(hdr.Size) + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + + // Set the current file reader. + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = tr.octal(header[483:495]) + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + // Current file is a GNU sparse file. Update the current file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } + + return hdr +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// A sparse entry indicates the offset and size in a sparse file of a +// block of data. +type sparseEntry struct { + offset int64 + numBytes int64 +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(sparseHeader); tr.err != nil { + return nil + } + } + + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0. +// The sparse map is stored just before the file data and padded out to the nearest block boundary. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + buf := make([]byte, 2*blockSize) + sparseHeader := buf[:blockSize] + + // readDecimal is a helper function to read a decimal integer from the sparse map + // while making sure to read from the file in blocks of size blockSize + readDecimal := func() (int64, error) { + // Look for newline + nl := bytes.IndexByte(sparseHeader, '\n') + if nl == -1 { + if len(sparseHeader) >= blockSize { + // This is an error + return 0, ErrHeader + } + oldLen := len(sparseHeader) + newLen := oldLen + blockSize + if cap(sparseHeader) < newLen { + // There's more header, but we need to make room for the next block + copy(buf, sparseHeader) + sparseHeader = buf[:newLen] + } else { + // There's more header, and we can just reslice + sparseHeader = sparseHeader[:newLen] + } + + // Now that sparseHeader is large enough, read next block + if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil { + return 0, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err := tr.rawBytes.Write(sparseHeader[oldLen:newLen]); err != nil { + return 0, err + } + } + + // Look for a newline in the new data + nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n') + if nl == -1 { + // This is an error + return 0, ErrHeader + } + nl += oldLen // We want the position from the beginning + } + // Now that we've found a newline, read a number + n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0) + if err != nil { + return 0, ErrHeader + } + + // Update sparseHeader to consume this number + sparseHeader = sparseHeader[nl+1:] + return n, nil + } + + // Read the first block + if _, err := io.ReadFull(r, sparseHeader); err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err := tr.rawBytes.Write(sparseHeader); err != nil { + return nil, err + } + } + + // The first line contains the number of entries + numEntries, err := readDecimal() + if err != nil { + return nil, err + } + + // Read all the entries + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + // Read the offset + offset, err := readDecimal() + if err != nil { + return nil, err + } + // Read numBytes + numBytes, err := readDecimal() + if err != nil { + return nil, err + } + + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1. +// The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) { + // Get number of entries + numEntriesStr, ok := headers[paxGNUSparseNumBlocks] + if !ok { + return nil, ErrHeader + } + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) + if err != nil { + return nil, ErrHeader + } + + sparseMap := strings.Split(headers[paxGNUSparseMap], ",") + + // There should be two numbers in sparseMap for each entry + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.curr == nil { + return 0, io.EOF + } + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// readHole reads a sparse file hole ending at offset toOffset +func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { + n64 := toOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + if len(sfr.sp) == 0 { + // No more data fragments to read from. + if sfr.pos < sfr.tot { + // We're in the last hole + n = sfr.readHole(b, sfr.tot) + return + } + // Otherwise, we're at the end of the file + return 0, io.EOF + } + if sfr.tot < sfr.sp[0].offset { + return 0, io.ErrUnexpectedEOF + } + if sfr.pos < sfr.sp[0].offset { + // We're in a hole + n = sfr.readHole(b, sfr.sp[0].offset) + return + } + + // We're not in a hole, so we'll read from the next data fragment + posInFragment := sfr.pos - sfr.sp[0].offset + bytesLeft := sfr.sp[0].numBytes - posInFragment + if int64(len(b)) > bytesLeft { + b = b[0:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + + if int64(n) == bytesLeft { + // We're done with this fragment + sfr.sp = sfr.sp[1:] + } + + if err == io.EOF && sfr.pos < sfr.tot { + // We reached the end of the last fragment's data, but there's a final hole + err = nil + } + return +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.nb +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atim.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atim.go new file mode 100644 index 00000000..cf9cc79c --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go new file mode 100644 index 00000000..6f17dbe3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_unix.go new file mode 100644 index 00000000..cb843db4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/writer.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/writer.go new file mode 100644 index 00000000..9dbc01a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/writer.go @@ -0,0 +1,396 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errNameTooLong = errors.New("archive/tar: name too long") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +// If the value is too long for the field and allowPax is true add a paxheader record instead +func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + if len(s) > len(b) { + if tw.err == nil { + tw.err = ErrFieldTooLong + } + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (tw *Writer) octal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + tw.cString(b, s, false, paxNone, nil) +} + +// Write x into b, either as octal or as binary (GNUtar/star extension). +// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead +func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + tw.octal(b, x) + return + } + + // If it is too long for octal, and pax is preferred, use a pax header + if allowPax && tw.preferPax { + tw.octal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + // Too big: use binary (big-endian). + tw.usedBinary = true + for i := len(b) - 1; x > 0 && i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // highest bit indicates binary format +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) + s := slicer(header) + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders) + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + tw.octal(s.next(8), hdr.Mode) // 100:108 + tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116 + tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124 + tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136 + tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297 + tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329 + tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337 + tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + suffix := hdr.Name + prefix := "" + if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) { + var err error + prefix, suffix, err = tw.splitUSTARLongName(hdr.Name) + if err == nil { + // ok we can use a ustar long name instead of pax, now correct the fields + + // remove the path field from the pax header. this will suppress the pax header + delete(paxHeaders, paxPath) + + // update the path fields + tw.cString(pathHeaderBytes, suffix, false, paxNone, nil) + tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) + + // Use the ustar magic if we used ustar long names. + if len(prefix) > 0 && !tw.usedBinary { + copy(header[257:265], []byte("ustar\x00")) + } + } + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + tw.octal(header[148:155], chksum) + header[155] = ' ' + + if tw.err != nil { + // problem with header; probably integer too big for a field. + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +// writeUSTARLongName splits a USTAR long name hdr.Name. +// name must be < 256 characters. errNameTooLong is returned +// if hdr.Name can't be split. The splitting heuristic +// is compatible with gnu tar. +func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) { + length := len(name) + if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + i := strings.LastIndex(name[:length], "/") + // nlen contains the resulting length in the name field. + // plen contains the resulting length in the prefix field. + nlen := len(name) - i - 1 + plen := i + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + err = errNameTooLong + return + } + prefix, suffix = name[:i], name[i+1:] + return +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. + pid := os.Getpid() + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, + fmt.Sprintf("PaxHeaders.%d", pid), file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + for k, v := range paxHeaders { + fmt.Fprint(&buf, paxHeader(k+"="+v)) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// paxHeader formats a single pax record, prefixing it with the appropriate length +func paxHeader(msg string) string { + const padding = 2 // Extra padding for space and newline + size := len(msg) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s\n", size, msg) + if len(record) != size { + // Final adjustment if adding size increased + // the number of digits in size + size = len(record) + record = fmt.Sprintf("%d %s\n", size, msg) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/README.md b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/README.md new file mode 100644 index 00000000..2a3a5b56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/README.md @@ -0,0 +1,44 @@ +asm +=== + +This library for assembly and disassembly of tar archives, facilitated by +`github.com/vbatts/tar-split/tar/storage`. + + +Concerns +-------- + +For completely safe assembly/disassembly, there will need to be a Content +Addressable Storage (CAS) directory, that maps to a checksum in the +`storage.Entity` of `storage.FileType`. + +This is due to the fact that tar archives _can_ allow multiple records for the +same path, but the last one effectively wins. Even if the prior records had a +different payload. + +In this way, when assembling an archive from relative paths, if the archive has +multiple entries for the same path, then all payloads read in from a relative +path would be identical. + + +Thoughts +-------- + +Have a look-aside directory or storage. This way when a clobbering record is +encountered from the tar stream, then the payload of the prior/existing file is +stored to the CAS. This way the clobbering record's file payload can be +extracted, but we'll have preserved the payload needed to reassemble a precise +tar archive. + +clobbered/path/to/file.[0-N] + +*alternatively* + +We could just _not_ support tar streams that have clobbering file paths. +Appending records to the archive is not incredibly common, and doesn't happen +by default for most implementations. Not supporting them wouldn't be a +security concern either, as if it did occur, we would reassemble an archive +that doesn't validate signature/checksum, so it shouldn't be trusted anyway. + +Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE. + diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/assemble.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/assemble.go new file mode 100644 index 00000000..d624450a --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/assemble.go @@ -0,0 +1,130 @@ +package asm + +import ( + "bytes" + "fmt" + "hash" + "hash/crc64" + "io" + "sync" + + "github.com/vbatts/tar-split/tar/storage" +) + +// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive +// stream. +// +// It takes a storage.FileGetter, for mapping the file payloads that are to be read in, +// and a storage.Unpacker, which has access to the rawbytes and file order +// metadata. With the combination of these two items, a precise assembled Tar +// archive is possible. +func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + pr, pw := io.Pipe() + go func() { + err := WriteOutputTarStream(fg, up, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + return pr +} + +// WriteOutputTarStream writes assembled tar archive to a writer. +func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + var copyBuffer []byte + var crcHash hash.Hash + var crcSum []byte + var multiWriter io.Writer + for { + entry, err := up.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + switch entry.Type { + case storage.SegmentType: + if _, err := w.Write(entry.Payload); err != nil { + return err + } + case storage.FileType: + if entry.Size == 0 { + continue + } + fh, err := fg.Get(entry.GetName()) + if err != nil { + return err + } + if crcHash == nil { + crcHash = crc64.New(storage.CRCTable) + crcSum = make([]byte, 8) + multiWriter = io.MultiWriter(w, crcHash) + copyBuffer = byteBufferPool.Get().([]byte) + defer byteBufferPool.Put(copyBuffer) + } else { + crcHash.Reset() + } + + if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil { + fh.Close() + return err + } + + if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) { + // I would rather this be a comparable ErrInvalidChecksum or such, + // but since it's coming through the PipeReader, the context of + // _which_ file would be lost... + fh.Close() + return fmt.Errorf("file integrity checksum failed for %q", entry.GetName()) + } + fh.Close() + } + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +// copyWithBuffer is taken from stdlib io.Copy implementation +// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367 +func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/disassemble.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/disassemble.go new file mode 100644 index 00000000..54ef23ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/disassemble.go @@ -0,0 +1,141 @@ +package asm + +import ( + "io" + "io/ioutil" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// NewInputTarStream wraps the Reader stream of a tar archive and provides a +// Reader stream of the same. +// +// In the middle it will pack the segments and file metadata to storage.Packer +// `p`. +// +// The the storage.FilePutter is where payload of files in the stream are +// stashed. If this stashing is not needed, you can provide a nil +// storage.FilePutter. Since the checksumming is still needed, then a default +// of NewDiscardFilePutter will be used internally +func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) { + // What to do here... folks will want their own access to the Reader that is + // their tar archive stream, but we'll need that same stream to use our + // forked 'archive/tar'. + // Perhaps do an io.TeeReader that hands back an io.Reader for them to read + // from, and we'll MITM the stream to store metadata. + // We'll need a storage.FilePutter too ... + + // Another concern, whether to do any storage.FilePutter operations, such that we + // don't extract any amount of the archive. But then again, we're not making + // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter. + // Perhaps we have a DiscardFilePutter that is a bit bucket. + + // we'll return the pipe reader, since TeeReader does not buffer and will + // only read what the outputRdr Read's. Since Tar archives have padding on + // the end, we want to be the one reading the padding, even if the user's + // `archive/tar` doesn't care. + pR, pW := io.Pipe() + outputRdr := io.TeeReader(r, pW) + + // we need a putter that will generate the crc64 sums of file payloads + if fp == nil { + fp = storage.NewDiscardFilePutter() + } + + go func() { + tr := tar.NewReader(outputRdr) + tr.RawAccounting = true + for { + hdr, err := tr.Next() + if err != nil { + if err != io.EOF { + pW.CloseWithError(err) + return + } + // even when an EOF is reached, there is often 1024 null bytes on + // the end of an archive. Collect them too. + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + break // not return. We need the end of the reader. + } + if hdr == nil { + break // not return. We need the end of the reader. + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + + var csum []byte + if hdr.Size > 0 { + var err error + _, csum, err = fp.Put(hdr.Name, tr) + if err != nil { + pW.CloseWithError(err) + return + } + } + + entry := storage.Entry{ + Type: storage.FileType, + Size: hdr.Size, + Payload: csum, + } + // For proper marshalling of non-utf8 characters + entry.SetName(hdr.Name) + + // File entries added, regardless of size + _, err = p.AddEntry(entry) + if err != nil { + pW.CloseWithError(err) + return + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + } + + // it is allowable, and not uncommon that there is further padding on the + // end of an archive, apart from the expected 1024 null bytes. + remainder, err := ioutil.ReadAll(outputRdr) + if err != nil && err != io.EOF { + pW.CloseWithError(err) + return + } + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: remainder, + }) + if err != nil { + pW.CloseWithError(err) + return + } + pW.Close() + }() + + return pR, nil +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/doc.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/doc.go new file mode 100644 index 00000000..4367b902 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/doc.go @@ -0,0 +1,9 @@ +/* +Package asm provides the API for streaming assembly and disassembly of tar +archives. + +Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the +metadata for a stream, as well as an implementation of Getting/Putting the file +entries' payload. +*/ +package asm diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/doc.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/doc.go new file mode 100644 index 00000000..83f7089f --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/doc.go @@ -0,0 +1,12 @@ +/* +Package storage is for metadata of a tar archive. + +Packing and unpacking the Entries of the stream. The types of streams are +either segments of raw bytes (for the raw headers and various padding) and for +an entry marking a file payload. + +The raw bytes are stored precisely in the packed (marshalled) Entry, whereas +the file payload marker include the name of the file, size, and crc64 checksum +(for basic file integrity). +*/ +package storage diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/entry.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/entry.go new file mode 100644 index 00000000..c91e7ea1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/entry.go @@ -0,0 +1,78 @@ +package storage + +import "unicode/utf8" + +// Entries is for sorting by Position +type Entries []Entry + +func (e Entries) Len() int { return len(e) } +func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position } + +// Type of Entry +type Type int + +const ( + // FileType represents a file payload from the tar stream. + // + // This will be used to map to relative paths on disk. Only Size > 0 will get + // read into a resulting output stream (due to hardlinks). + FileType Type = 1 + iota + // SegmentType represents a raw bytes segment from the archive stream. These raw + // byte segments consist of the raw headers and various padding. + // + // Its payload is to be marshalled base64 encoded. + SegmentType +) + +// Entry is the structure for packing and unpacking the information read from +// the Tar archive. +// +// FileType Payload checksum is using `hash/crc64` for basic file integrity, +// _not_ for cryptography. +// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000 +// collisions in a sample of 18.2 million, CRC64 had none. +type Entry struct { + Type Type `json:"type"` + Name string `json:"name,omitempty"` + NameRaw []byte `json:"name_raw,omitempty"` + Size int64 `json:"size,omitempty"` + Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here; + Position int `json:"position"` +} + +// SetName will check name for valid UTF-8 string, and set the appropriate +// field. See https://github.com/vbatts/tar-split/issues/17 +func (e *Entry) SetName(name string) { + if utf8.ValidString(name) { + e.Name = name + } else { + e.NameRaw = []byte(name) + } +} + +// SetNameBytes will check name for valid UTF-8 string, and set the appropriate +// field +func (e *Entry) SetNameBytes(name []byte) { + if utf8.Valid(name) { + e.Name = string(name) + } else { + e.NameRaw = name + } +} + +// GetName returns the string for the entry's name, regardless of the field stored in +func (e *Entry) GetName() string { + if len(e.NameRaw) > 0 { + return string(e.NameRaw) + } + return e.Name +} + +// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in +func (e *Entry) GetNameBytes() []byte { + if len(e.NameRaw) > 0 { + return e.NameRaw + } + return []byte(e.Name) +} diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/getter.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/getter.go new file mode 100644 index 00000000..ae11f8ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/getter.go @@ -0,0 +1,104 @@ +package storage + +import ( + "bytes" + "errors" + "hash/crc64" + "io" + "os" + "path/filepath" +) + +// FileGetter is the interface for getting a stream of a file payload, +// addressed by name/filename. Presumably, the names will be scoped to relative +// file paths. +type FileGetter interface { + // Get returns a stream for the provided file path + Get(filename string) (output io.ReadCloser, err error) +} + +// FilePutter is the interface for storing a stream of a file payload, +// addressed by name/filename. +type FilePutter interface { + // Put returns the size of the stream received, and the crc64 checksum for + // the provided stream + Put(filename string, input io.Reader) (size int64, checksum []byte, err error) +} + +// FileGetPutter is the interface that groups both Getting and Putting file +// payloads. +type FileGetPutter interface { + FileGetter + FilePutter +} + +// NewPathFileGetter returns a FileGetter that is for files relative to path +// relpath. +func NewPathFileGetter(relpath string) FileGetter { + return &pathFileGetter{root: relpath} +} + +type pathFileGetter struct { + root string +} + +func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) { + return os.Open(filepath.Join(pfg.root, filename)) +} + +type bufferFileGetPutter struct { + files map[string][]byte +} + +func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) { + if _, ok := bfgp.files[name]; !ok { + return nil, errors.New("no such file") + } + b := bytes.NewBuffer(bfgp.files[name]) + return &readCloserWrapper{b}, nil +} + +func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) { + crc := crc64.New(CRCTable) + buf := bytes.NewBuffer(nil) + cw := io.MultiWriter(crc, buf) + i, err := io.Copy(cw, r) + if err != nil { + return 0, nil, err + } + bfgp.files[name] = buf.Bytes() + return i, crc.Sum(nil), nil +} + +type readCloserWrapper struct { + io.Reader +} + +func (w *readCloserWrapper) Close() error { return nil } + +// NewBufferFileGetPutter is a simple in-memory FileGetPutter +// +// Implication is this is memory intensive... +// Probably best for testing or light weight cases. +func NewBufferFileGetPutter() FileGetPutter { + return &bufferFileGetPutter{ + files: map[string][]byte{}, + } +} + +// NewDiscardFilePutter is a bit bucket FilePutter +func NewDiscardFilePutter() FilePutter { + return &bitBucketFilePutter{} +} + +type bitBucketFilePutter struct { +} + +func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { + c := crc64.New(CRCTable) + i, err := io.Copy(c, r) + return i, c.Sum(nil), err +} + +// CRCTable is the default table used for crc64 sum calculations +var CRCTable = crc64.MakeTable(crc64.ISO) diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/packer.go b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/packer.go new file mode 100644 index 00000000..aba69481 --- /dev/null +++ b/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/packer.go @@ -0,0 +1,127 @@ +package storage + +import ( + "encoding/json" + "errors" + "io" + "path/filepath" + "unicode/utf8" +) + +// ErrDuplicatePath occurs when a tar archive has more than one entry for the +// same file path +var ErrDuplicatePath = errors.New("duplicates of file paths not supported") + +// Packer describes the methods to pack Entries to a storage destination +type Packer interface { + // AddEntry packs the Entry and returns its position + AddEntry(e Entry) (int, error) +} + +// Unpacker describes the methods to read Entries from a source +type Unpacker interface { + // Next returns the next Entry being unpacked, or error, until io.EOF + Next() (*Entry, error) +} + +/* TODO(vbatts) figure out a good model for this +type PackUnpacker interface { + Packer + Unpacker +} +*/ + +type jsonUnpacker struct { + seen seenNames + dec *json.Decoder +} + +func (jup *jsonUnpacker) Next() (*Entry, error) { + var e Entry + err := jup.dec.Decode(&e) + if err != nil { + return nil, err + } + + // check for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jup.seen[cName]; ok { + return nil, ErrDuplicatePath + } + jup.seen[cName] = struct{}{} + } + + return &e, err +} + +// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and +// FileType) as a json document. +// +// Each Entry read are expected to be delimited by new line. +func NewJSONUnpacker(r io.Reader) Unpacker { + return &jsonUnpacker{ + dec: json.NewDecoder(r), + seen: seenNames{}, + } +} + +type jsonPacker struct { + w io.Writer + e *json.Encoder + pos int + seen seenNames +} + +type seenNames map[string]struct{} + +func (jp *jsonPacker) AddEntry(e Entry) (int, error) { + // if Name is not valid utf8, switch it to raw first. + if e.Name != "" { + if !utf8.ValidString(e.Name) { + e.NameRaw = []byte(e.Name) + e.Name = "" + } + } + + // check early for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jp.seen[cName]; ok { + return -1, ErrDuplicatePath + } + jp.seen[cName] = struct{}{} + } + + e.Position = jp.pos + err := jp.e.Encode(e) + if err != nil { + return -1, err + } + + // made it this far, increment now + jp.pos++ + return e.Position, nil +} + +// NewJSONPacker provides a Packer that writes each Entry (SegmentType and +// FileType) as a json document. +// +// The Entries are delimited by new line. +func NewJSONPacker(w io.Writer) Packer { + return &jsonPacker{ + w: w, + e: json.NewEncoder(w), + seen: seenNames{}, + } +} + +/* +TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck +(https://github.com/ugorji/go) + + +Even though, since our jsonUnpacker and jsonPacker just take +io.Reader/io.Writer, then we can get away with passing them a +gzip.Reader/gzip.Writer +*/ diff --git a/Godeps/_workspace/src/golang.org/x/net/LICENSE b/Godeps/_workspace/src/golang.org/x/net/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/net/PATENTS b/Godeps/_workspace/src/golang.org/x/net/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go new file mode 100644 index 00000000..11bd8d34 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 00000000..e3170e33 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 00000000..56bcbadb --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000..19106759 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,123 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + cancel() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +}