diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 40767556..00000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,240 +0,0 @@ -{ - "ImportPath": "github.com/runcom/skopeo", - "GoVersion": "go1.5.3", - "Packages": [ - "." - ], - "Deps": [ - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.8.7-55-gf7f79f7", - "Rev": "f7f79f729e0fbe2fcc061db48a9ba0263f588252" - }, - { - "ImportPath": "github.com/codegangsta/cli", - "Comment": "1.2.0-187-gc31a797", - "Rev": "c31a7975863e7810c92e2e288a9ab074f9a88f29" - }, - { - "ImportPath": "github.com/docker/distribution", - "Comment": "v2.2.0-207-gcaa2001", - "Rev": "caa2001e1fa738e14be6ba5f89cd9d41aebcd204" - }, - { - "ImportPath": "github.com/docker/docker/api", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/cliconfig", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/daemon/graphdriver", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/distribution", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/dockerversion", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/image", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/layer", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/opts", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/archive", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/chrootarchive", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/fileutils", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/homedir", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/httputils", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/idtools", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/jsonlog", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/mflag", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/parsers/kernel", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/pools", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/progress", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/promise", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/random", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/reexec", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/stringid", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/tarsum", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/useragent", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/pkg/version", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/reference", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/docker/registry", - "Comment": "v1.4.1-9441-gc3a9ece", - "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a" - }, - { - "ImportPath": "github.com/docker/engine-api/types", - "Comment": "v0.2.2-13-ge98ada9", - "Rev": "e98ada9b4fa351554aa7ee7cf857a2d6d26b1453" - }, - { - "ImportPath": "github.com/docker/go-connections/nat", - "Comment": "v0.1.2-2-g082e382", - "Rev": "082e382836465660fe3fd21de06624a205f986eb" - }, - { - "ImportPath": "github.com/docker/go-connections/tlsconfig", - "Comment": "v0.1.2-2-g082e382", - "Rev": "082e382836465660fe3fd21de06624a205f986eb" - }, - { - "ImportPath": "github.com/docker/go-units", - "Comment": "v0.1.0-21-g0bbddae", - "Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "1c83b3eabd45b6d76072b66b746c20815fb2872d" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "26a6070f849969ba72b72256e9f14cf519751690" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v0.0.4-226-gbc46574", - "Rev": "bc465742ac61aad50dec8e9769a75f01d8d8e502" - }, - { - "ImportPath": "github.com/vbatts/tar-split/archive/tar", - "Comment": "v0.9.11-1-gd50e5c9", - "Rev": "d50e5c9283da469398d84078519de569f617be6f" - }, - { - "ImportPath": "github.com/vbatts/tar-split/tar/asm", - "Comment": "v0.9.11-1-gd50e5c9", - "Rev": "d50e5c9283da469398d84078519de569f617be6f" - }, - { - "ImportPath": "github.com/vbatts/tar-split/tar/storage", - "Comment": "v0.9.11-1-gd50e5c9", - "Rev": "d50e5c9283da469398d84078519de569f617be6f" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "b2ed34f6fc8d65cc6a090fb87692ea6b1162fddd" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d..00000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d684..00000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index a1623ec0..00000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default - log.Level = logrus.DebugLevel -} - -func main() { - defer func() { - err := recover() - if err != nil { - log.WithFields(logrus.Fields{ - "omg": true, - "err": err, - "number": 100, - }).Fatal("The ice breaks!") - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index 3187f6d3..00000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go deleted file mode 100644 index aad646ab..00000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go +++ /dev/null @@ -1,61 +0,0 @@ -package logstash - -import ( - "encoding/json" - "fmt" - - "github.com/Sirupsen/logrus" -) - -// Formatter generates json in logstash format. -// Logstash site: http://logstash.net/ -type LogstashFormatter struct { - Type string // if not empty use for logstash type field. - - // TimestampFormat sets the format used for timestamps. - TimestampFormat string -} - -func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { - fields := make(logrus.Fields) - for k, v := range entry.Data { - fields[k] = v - } - - fields["@version"] = 1 - - if f.TimestampFormat == "" { - f.TimestampFormat = logrus.DefaultTimestampFormat - } - - fields["@timestamp"] = entry.Time.Format(f.TimestampFormat) - - // set message field - v, ok := entry.Data["message"] - if ok { - fields["fields.message"] = v - } - fields["message"] = entry.Message - - // set level field - v, ok = entry.Data["level"] - if ok { - fields["fields.level"] = v - } - fields["level"] = entry.Level.String() - - // set type field - if f.Type != "" { - v, ok = entry.Data["type"] - if ok { - fields["fields.type"] = v - } - fields["type"] = f.Type - } - - serialized, err := json.Marshal(fields) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index 066704b3..00000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` - -If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index c59f331d..00000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build !windows,!nacl,!plan9 - -package logrus_syslog - -import ( - "fmt" - "github.com/Sirupsen/logrus" - "log/syslog" - "os" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Level { - case logrus.PanicLevel: - return hook.Writer.Crit(line) - case logrus.FatalLevel: - return hook.Writer.Crit(line) - case logrus.ErrorLevel: - return hook.Writer.Err(line) - case logrus.WarnLevel: - return hook.Writer.Warning(line) - case logrus.InfoLevel: - return hook.Writer.Info(line) - case logrus.DebugLevel: - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index 3e70bf7b..00000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete deleted file mode 100644 index 21a232f1..00000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -: ${PROG:=$(basename ${BASH_SOURCE})} - -_cli_bash_autocomplete() { - local cur opts base - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - } - - complete -F _cli_bash_autocomplete $PROG diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete deleted file mode 100644 index 5430a18f..00000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete +++ /dev/null @@ -1,5 +0,0 @@ -autoload -U compinit && compinit -autoload -U bashcompinit && bashcompinit - -script_dir=$(dirname $0) -source ${script_dir}/bash_autocomplete diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json deleted file mode 100644 index 16100e21..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "ImportPath": "github.com/docker/distribution", - "GoVersion": "go1.4.2", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "golang.org/x/oauth2", - "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" - }, - { - "ImportPath": "golang.org/x/oauth2/google", - "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" - }, - { - "ImportPath": "google.golang.org/api/storage/v1", - "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" - }, - { - "ImportPath": "google.golang.org/cloud", - "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" - }, - { - "ImportPath": "google.golang.org/api", - "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" - }, - { - "ImportPath": "google.golang.org/grpc", - "Rev": "91c8b79535eb6045d70ec671d302213f88a3ab95" - }, - { - "ImportPath": "github.com/bradfitz/http2", - "Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f" - }, - { - "ImportPath": "github.com/golang/protobuf", - "Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644" - }, - { - "ImportPath": "google.golang.org/cloud/storage", - "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" - }, - { - "ImportPath": "github.com/AdRoll/goamz/aws", - "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" - }, - { - "ImportPath": "github.com/AdRoll/goamz/cloudfront", - "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" - }, - { - "ImportPath": "github.com/AdRoll/goamz/s3", - "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" - }, - { - "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.7.3", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" - }, - { - "ImportPath": "github.com/bugsnag/bugsnag-go", - "Comment": "v1.0.2-5-gb1d1530", - "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" - }, - { - "ImportPath": "github.com/bugsnag/osext", - "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" - }, - { - "ImportPath": "github.com/bugsnag/panicwrap", - "Rev": "e2c28503fcd0675329da73bf48b33404db873782" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/common", - "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" - }, - { - "ImportPath": "github.com/garyburd/redigo/internal", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/garyburd/redigo/redis", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" - }, - { - "ImportPath": "github.com/gorilla/handlers", - "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" - }, - { - "ImportPath": "github.com/inconshreveable/mousetrap", - "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - }, - { - "ImportPath": "github.com/mitchellh/mapstructure", - "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" - }, - { - "ImportPath": "github.com/ncw/swift", - "Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3" - }, - { - "ImportPath": "github.com/noahdesu/go-ceph/rados", - "Comment": "v.0.3.0-29-gb15639c", - "Rev": "b15639c44c05368348355229070361395d9152ee" - }, - { - "ImportPath": "github.com/spf13/cobra", - "Rev": "312092086bed4968099259622145a0c9ae280064" - }, - { - "ImportPath": "github.com/spf13/pflag", - "Rev": "5644820622454e71517561946e3d94b9f9db6842" - }, - { - "ImportPath": "github.com/stevvooe/resumable", - "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" - }, - { - "ImportPath": "github.com/yvasiyarov/go-metrics", - "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" - }, - { - "ImportPath": "github.com/yvasiyarov/gorelic", - "Comment": "v0.0.6-8-ga9bba5b", - "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" - }, - { - "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", - "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" - }, - { - "ImportPath": "golang.org/x/crypto/bcrypt", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/crypto/blowfish", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" - }, - { - "ImportPath": "golang.org/x/net/trace", - "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme deleted file mode 100644 index 4cdaa53d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go deleted file mode 100644 index 49426a88..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io" - "log" - "os" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/version" -) - -var ( - algorithm = digest.Canonical - showVersion bool -) - -type job struct { - name string - reader io.Reader -} - -func init() { - flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") - flag.Var(&algorithm, "algorithm", "select the digest algorithm") - flag.BoolVar(&showVersion, "version", false, "show the version and exit") - - log.SetFlags(0) - log.SetPrefix(os.Args[0] + ": ") -} - -func usage() { - fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) - fmt.Fprintf(os.Stderr, ` -Calculate the digest of one or more input files, emitting the result -to standard out. If no files are provided, the digest of stdin will -be calculated. - -`) - flag.PrintDefaults() -} - -func unsupported() { - log.Fatalf("unsupported digest algorithm: %v", algorithm) -} - -func main() { - var jobs []job - - flag.Usage = usage - flag.Parse() - if showVersion { - version.PrintVersion() - return - } - - var fail bool // if we fail on one item, foul the exit code - if flag.NArg() > 0 { - for _, path := range flag.Args() { - fp, err := os.Open(path) - - if err != nil { - log.Printf("%s: %v", path, err) - fail = true - continue - } - defer fp.Close() - - jobs = append(jobs, job{name: path, reader: fp}) - } - } else { - // just read stdin - jobs = append(jobs, job{name: "-", reader: os.Stdin}) - } - - digestFn := algorithm.FromReader - - if !algorithm.Available() { - unsupported() - } - - for _, job := range jobs { - dgst, err := digestFn(job.reader) - if err != nil { - log.Printf("%s: %v", job.name, err) - fail = true - continue - } - - fmt.Printf("%v\t%s\n", dgst, job.name) - } - - if fail { - os.Exit(1) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go deleted file mode 100644 index e9cbc42a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// registry-api-descriptor-template uses the APIDescriptor defined in the -// api/v2 package to execute templates passed to the command line. -// -// For example, to generate a new API specification, one would execute the -// following command from the repo root: -// -// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md -// -// The templates are passed in the api/v2.APIDescriptor object. Please see the -// package documentation for fields available on that object. The template -// syntax is from Go's standard library text/template package. For information -// on Go's template syntax, please see golang.org/pkg/text/template. -package main - -import ( - "log" - "net/http" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" -) - -var spaceRegex = regexp.MustCompile(`\n\s*`) - -func main() { - - if len(os.Args) != 2 { - log.Fatalln("please specify a template to execute.") - } - - path := os.Args[1] - filename := filepath.Base(path) - - funcMap := template.FuncMap{ - "removenewlines": func(s string) string { - return spaceRegex.ReplaceAllString(s, " ") - }, - "statustext": http.StatusText, - "prettygorilla": prettyGorillaMuxPath, - } - - tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) - - data := struct { - RouteDescriptors []v2.RouteDescriptor - ErrorDescriptors []errcode.ErrorDescriptor - }{ - RouteDescriptors: v2.APIDescriptor.RouteDescriptors, - ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), - // The following are part of the specification but provided by errcode default. - errcode.ErrorCodeUnauthorized.Descriptor(), - errcode.ErrorCodeDenied.Descriptor(), - errcode.ErrorCodeUnsupported.Descriptor()), - } - - if err := tmpl.Execute(os.Stdout, data); err != nil { - log.Fatalln(err) - } -} - -// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux -// route string, making it suitable for documentation. -func prettyGorillaMuxPath(s string) string { - // Stateful parser that removes regular expressions from gorilla - // routes. It correctly handles balanced bracket pairs. - - var output string - var label string - var level int - -start: - if s[0] == '{' { - s = s[1:] - level++ - goto capture - } - - output += string(s[0]) - s = s[1:] - - goto end -capture: - switch s[0] { - case '{': - level++ - case '}': - level-- - - if level == 0 { - s = s[1:] - goto label - } - case ':': - s = s[1:] - goto skip - default: - label += string(s[0]) - } - s = s[1:] - goto capture -skip: - switch s[0] { - case '{': - level++ - case '}': - level-- - } - s = s[1:] - - if level == 0 { - goto label - } - - goto skip -label: - if label != "" { - output += "<" + label + ">" - label = "" - } -end: - if s != "" { - goto start - } - - return output - -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml deleted file mode 100644 index 7a274ea5..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml +++ /dev/null @@ -1,55 +0,0 @@ -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development -storage: - cache: - blobdescriptor: redis - filesystem: - rootdirectory: /var/lib/registry-cache - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-8082 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true -proxy: - remoteurl: https://registry-1.docker.io - username: username - password: password -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml deleted file mode 100644 index b6438be5..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml +++ /dev/null @@ -1,66 +0,0 @@ -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com -storage: - delete: - enabled: true - cache: - blobdescriptor: redis - filesystem: - rootdirectory: /var/lib/registry - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-5003 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml deleted file mode 100644 index b5700e19..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 0.1 -log: - fields: - service: registry -storage: - cache: - blobdescriptor: inmemory - filesystem: - rootdirectory: /var/lib/registry -http: - addr: :5000 - headers: - X-Content-Type-Options: [nosniff] -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go deleted file mode 100644 index 603a44a5..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - _ "net/http/pprof" - - "github.com/docker/distribution/registry" - _ "github.com/docker/distribution/registry/auth/htpasswd" - _ "github.com/docker/distribution/registry/auth/silly" - _ "github.com/docker/distribution/registry/auth/token" - _ "github.com/docker/distribution/registry/proxy" - _ "github.com/docker/distribution/registry/storage/driver/azure" - _ "github.com/docker/distribution/registry/storage/driver/filesystem" - _ "github.com/docker/distribution/registry/storage/driver/gcs" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" - _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" - _ "github.com/docker/distribution/registry/storage/driver/oss" - _ "github.com/docker/distribution/registry/storage/driver/s3" - _ "github.com/docker/distribution/registry/storage/driver/swift" -) - -func main() { - registry.Cmd.Execute() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go deleted file mode 100644 index e7ea770a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build include_rados - -package main - -import _ "github.com/docker/distribution/registry/storage/driver/rados" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go deleted file mode 100644 index 3dff32f8..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go +++ /dev/null @@ -1,571 +0,0 @@ -package configuration - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strings" - "time" -) - -// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and -// optionally modified by environment variables. -// -// Note that yaml field names should never include _ characters, since this is the separator used -// in environment variable names. -type Configuration struct { - // Version is the version which defines the format of the rest of the configuration - Version Version `yaml:"version"` - - // Log supports setting various parameters related to the logging - // subsystem. - Log struct { - // Level is the granularity at which registry operations are logged. - Level Loglevel `yaml:"level"` - - // Formatter overrides the default formatter with another. Options - // include "text", "json" and "logstash". - Formatter string `yaml:"formatter,omitempty"` - - // Fields allows users to specify static string fields to include in - // the logger context. - Fields map[string]interface{} `yaml:"fields,omitempty"` - - // Hooks allows users to configurate the log hooks, to enabling the - // sequent handling behavior, when defined levels of log message emit. - Hooks []LogHook `yaml:"hooks,omitempty"` - } - - // Loglevel is the level at which registry operations are logged. This is - // deprecated. Please use Log.Level in the future. - Loglevel Loglevel `yaml:"loglevel,omitempty"` - - // Storage is the configuration for the registry's storage driver - Storage Storage `yaml:"storage"` - - // Auth allows configuration of various authorization methods that may be - // used to gate requests. - Auth Auth `yaml:"auth,omitempty"` - - // Middleware lists all middlewares to be used by the registry. - Middleware map[string][]Middleware `yaml:"middleware,omitempty"` - - // Reporting is the configuration for error reporting - Reporting Reporting `yaml:"reporting,omitempty"` - - // HTTP contains configuration parameters for the registry's http - // interface. - HTTP struct { - // Addr specifies the bind address for the registry instance. - Addr string `yaml:"addr,omitempty"` - - // Net specifies the net portion of the bind address. A default empty value means tcp. - Net string `yaml:"net,omitempty"` - - // Host specifies an externally-reachable address for the registry, as a fully - // qualified URL. - Host string `yaml:"host,omitempty"` - - Prefix string `yaml:"prefix,omitempty"` - - // Secret specifies the secret key which HMAC tokens are created with. - Secret string `yaml:"secret,omitempty"` - - // TLS instructs the http server to listen with a TLS configuration. - // This only support simple tls configuration with a cert and key. - // Mostly, this is useful for testing situations or simple deployments - // that require tls. If more complex configurations are required, use - // a proxy or make a proposal to add support here. - TLS struct { - // Certificate specifies the path to an x509 certificate file to - // be used for TLS. - Certificate string `yaml:"certificate,omitempty"` - - // Key specifies the path to the x509 key file, which should - // contain the private portion for the file specified in - // Certificate. - Key string `yaml:"key,omitempty"` - - // Specifies the CA certs for client authentication - // A file may contain multiple CA certificates encoded as PEM - ClientCAs []string `yaml:"clientcas,omitempty"` - } `yaml:"tls,omitempty"` - - // Headers is a set of headers to include in HTTP responses. A common - // use case for this would be security headers such as - // Strict-Transport-Security. The map keys are the header names, and - // the values are the associated header payloads. - Headers http.Header `yaml:"headers,omitempty"` - - // Debug configures the http debug interface, if specified. This can - // include services such as pprof, expvar and other data that should - // not be exposed externally. Left disabled by default. - Debug struct { - // Addr specifies the bind address for the debug server. - Addr string `yaml:"addr,omitempty"` - } `yaml:"debug,omitempty"` - } `yaml:"http,omitempty"` - - // Notifications specifies configuration about various endpoint to which - // registry events are dispatched. - Notifications Notifications `yaml:"notifications,omitempty"` - - // Redis configures the redis pool available to the registry webapp. - Redis struct { - // Addr specifies the the redis instance available to the application. - Addr string `yaml:"addr,omitempty"` - - // Password string to use when making a connection. - Password string `yaml:"password,omitempty"` - - // DB specifies the database to connect to on the redis instance. - DB int `yaml:"db,omitempty"` - - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data - - // Pool configures the behavior of the redis connection pool. - Pool struct { - // MaxIdle sets the maximum number of idle connections. - MaxIdle int `yaml:"maxidle,omitempty"` - - // MaxActive sets the maximum number of connections that should be - // opened before blocking a connection request. - MaxActive int `yaml:"maxactive,omitempty"` - - // IdleTimeout sets the amount time to wait before closing - // inactive connections. - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - } `yaml:"redis,omitempty"` - - Health Health `yaml:"health,omitempty"` - - Proxy Proxy `yaml:"proxy,omitempty"` -} - -// LogHook is composed of hook Level and Type. -// After hooks configuration, it can execute the next handling automatically, -// when defined levels of log message emitted. -// Example: hook can sending an email notification when error log happens in app. -type LogHook struct { - // Disable lets user select to enable hook or not. - Disabled bool `yaml:"disabled,omitempty"` - - // Type allows user to select which type of hook handler they want. - Type string `yaml:"type,omitempty"` - - // Levels set which levels of log message will let hook executed. - Levels []string `yaml:"levels,omitempty"` - - // MailOptions allows user to configurate email parameters. - MailOptions MailOptions `yaml:"options,omitempty"` -} - -// MailOptions provides the configuration sections to user, for specific handler. -type MailOptions struct { - SMTP struct { - // Addr defines smtp host address - Addr string `yaml:"addr,omitempty"` - - // Username defines user name to smtp host - Username string `yaml:"username,omitempty"` - - // Password defines password of login user - Password string `yaml:"password,omitempty"` - - // Insecure defines if smtp login skips the secure cerification. - Insecure bool `yaml:"insecure,omitempty"` - } `yaml:"smtp,omitempty"` - - // From defines mail sending address - From string `yaml:"from,omitempty"` - - // To defines mail receiving address - To []string `yaml:"to,omitempty"` -} - -// FileChecker is a type of entry in the health section for checking files. -type FileChecker struct { - // Interval is the duration in between checks - Interval time.Duration `yaml:"interval,omitempty"` - // File is the path to check - File string `yaml:"file,omitempty"` - // Threshold is the number of times a check must fail to trigger an - // unhealthy state - Threshold int `yaml:"threshold,omitempty"` -} - -// HTTPChecker is a type of entry in the health section for checking HTTP URIs. -type HTTPChecker struct { - // Timeout is the duration to wait before timing out the HTTP request - Timeout time.Duration `yaml:"interval,omitempty"` - // StatusCode is the expected status code - StatusCode int - // Interval is the duration in between checks - Interval time.Duration `yaml:"interval,omitempty"` - // URI is the HTTP URI to check - URI string `yaml:"uri,omitempty"` - // Headers lists static headers that should be added to all requests - Headers http.Header `yaml:"headers"` - // Threshold is the number of times a check must fail to trigger an - // unhealthy state - Threshold int `yaml:"threshold,omitempty"` -} - -// TCPChecker is a type of entry in the health section for checking TCP servers. -type TCPChecker struct { - // Timeout is the duration to wait before timing out the TCP connection - Timeout time.Duration `yaml:"interval,omitempty"` - // Interval is the duration in between checks - Interval time.Duration `yaml:"interval,omitempty"` - // Addr is the TCP address to check - Addr string `yaml:"addr,omitempty"` - // Threshold is the number of times a check must fail to trigger an - // unhealthy state - Threshold int `yaml:"threshold,omitempty"` -} - -// Health provides the configuration section for health checks. -type Health struct { - // FileCheckers is a list of paths to check - FileCheckers []FileChecker `yaml:"file,omitempty"` - // HTTPCheckers is a list of URIs to check - HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` - // TCPCheckers is a list of URIs to check - TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` - // StorageDriver configures a health check on the configured storage - // driver - StorageDriver struct { - // Enabled turns on the health check for the storage driver - Enabled bool `yaml:"enabled,omitempty"` - // Interval is the duration in between checks - Interval time.Duration `yaml:"interval,omitempty"` - // Threshold is the number of times a check must fail to trigger an - // unhealthy state - Threshold int `yaml:"threshold,omitempty"` - } `yaml:"storagedriver,omitempty"` -} - -// v0_1Configuration is a Version 0.1 Configuration struct -// This is currently aliased to Configuration, as it is the current version -type v0_1Configuration Configuration - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints -func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { - var versionString string - err := unmarshal(&versionString) - if err != nil { - return err - } - - newVersion := Version(versionString) - if _, err := newVersion.major(); err != nil { - return err - } - - if _, err := newVersion.minor(); err != nil { - return err - } - - *version = newVersion - return nil -} - -// CurrentVersion is the most recent Version that can be parsed -var CurrentVersion = MajorMinorVersion(0, 1) - -// Loglevel is the level at which operations are logged -// This can be error, warn, info, or debug -type Loglevel string - -// UnmarshalYAML implements the yaml.Umarshaler interface -// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a -// valid loglevel -func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { - var loglevelString string - err := unmarshal(&loglevelString) - if err != nil { - return err - } - - loglevelString = strings.ToLower(loglevelString) - switch loglevelString { - case "error", "warn", "info", "debug": - default: - return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) - } - - *loglevel = Loglevel(loglevelString) - return nil -} - -// Parameters defines a key-value parameters mapping -type Parameters map[string]interface{} - -// Storage defines the configuration for registry object storage -type Storage map[string]Parameters - -// Type returns the storage driver type, such as filesystem or s3 -func (storage Storage) Type() string { - var storageType []string - - // Return only key in this map - for k := range storage { - switch k { - case "maintenance": - // allow configuration of maintenance - case "cache": - // allow configuration of caching - case "delete": - // allow configuration of delete - case "redirect": - // allow configuration of redirect - default: - storageType = append(storageType, k) - } - } - if len(storageType) > 1 { - panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) - } - if len(storageType) == 1 { - return storageType[0] - } - return "" -} - -// Parameters returns the Parameters map for a Storage configuration -func (storage Storage) Parameters() Parameters { - return storage[storage.Type()] -} - -// setParameter changes the parameter at the provided key to the new value -func (storage Storage) setParameter(key string, value interface{}) { - storage[storage.Type()][key] = value -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { - var storageMap map[string]Parameters - err := unmarshal(&storageMap) - if err == nil { - if len(storageMap) > 1 { - types := make([]string, 0, len(storageMap)) - for k := range storageMap { - switch k { - case "maintenance": - // allow for configuration of maintenance - case "cache": - // allow configuration of caching - case "delete": - // allow configuration of delete - case "redirect": - // allow configuration of redirect - default: - types = append(types, k) - } - } - - if len(types) > 1 { - return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) - } - } - *storage = storageMap - return nil - } - - var storageType string - err = unmarshal(&storageType) - if err == nil { - *storage = Storage{storageType: Parameters{}} - return nil - } - - return err -} - -// MarshalYAML implements the yaml.Marshaler interface -func (storage Storage) MarshalYAML() (interface{}, error) { - if storage.Parameters() == nil { - return storage.Type(), nil - } - return map[string]Parameters(storage), nil -} - -// Auth defines the configuration for registry authorization. -type Auth map[string]Parameters - -// Type returns the storage driver type, such as filesystem or s3 -func (auth Auth) Type() string { - // Return only key in this map - for k := range auth { - return k - } - return "" -} - -// Parameters returns the Parameters map for an Auth configuration -func (auth Auth) Parameters() Parameters { - return auth[auth.Type()] -} - -// setParameter changes the parameter at the provided key to the new value -func (auth Auth) setParameter(key string, value interface{}) { - auth[auth.Type()][key] = value -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]Parameters - err := unmarshal(&m) - if err == nil { - if len(m) > 1 { - types := make([]string, 0, len(m)) - for k := range m { - types = append(types, k) - } - - // TODO(stevvooe): May want to change this slightly for - // authorization to allow multiple challenges. - return fmt.Errorf("must provide exactly one type. Provided: %v", types) - - } - *auth = m - return nil - } - - var authType string - err = unmarshal(&authType) - if err == nil { - *auth = Auth{authType: Parameters{}} - return nil - } - - return err -} - -// MarshalYAML implements the yaml.Marshaler interface -func (auth Auth) MarshalYAML() (interface{}, error) { - if auth.Parameters() == nil { - return auth.Type(), nil - } - return map[string]Parameters(auth), nil -} - -// Notifications configures multiple http endpoints. -type Notifications struct { - // Endpoints is a list of http configurations for endpoints that - // respond to webhook notifications. In the future, we may allow other - // kinds of endpoints, such as external queues. - Endpoints []Endpoint `yaml:"endpoints,omitempty"` -} - -// Endpoint describes the configuration of an http webhook notification -// endpoint. -type Endpoint struct { - Name string `yaml:"name"` // identifies the endpoint in the registry instance. - Disabled bool `yaml:"disabled"` // disables the endpoint - URL string `yaml:"url"` // post url for the endpoint. - Headers http.Header `yaml:"headers"` // static headers that should be added to all requests - Timeout time.Duration `yaml:"timeout"` // HTTP timeout - Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure - Backoff time.Duration `yaml:"backoff"` // backoff duration -} - -// Reporting defines error reporting methods. -type Reporting struct { - // Bugsnag configures error reporting for Bugsnag (bugsnag.com). - Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` - // NewRelic configures error reporting for NewRelic (newrelic.com) - NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` -} - -// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). -type BugsnagReporting struct { - // APIKey is the Bugsnag api key. - APIKey string `yaml:"apikey,omitempty"` - // ReleaseStage tracks where the registry is deployed. - // Examples: production, staging, development - ReleaseStage string `yaml:"releasestage,omitempty"` - // Endpoint is used for specifying an enterprise Bugsnag endpoint. - Endpoint string `yaml:"endpoint,omitempty"` -} - -// NewRelicReporting configures error reporting for NewRelic (newrelic.com) -type NewRelicReporting struct { - // LicenseKey is the NewRelic user license key - LicenseKey string `yaml:"licensekey,omitempty"` - // Name is the component name of the registry in NewRelic - Name string `yaml:"name,omitempty"` - // Verbose configures debug output to STDOUT - Verbose bool `yaml:"verbose,omitempty"` -} - -// Middleware configures named middlewares to be applied at injection points. -type Middleware struct { - // Name the middleware registers itself as - Name string `yaml:"name"` - // Flag to disable middleware easily - Disabled bool `yaml:"disabled,omitempty"` - // Map of parameters that will be passed to the middleware's initialization function - Options Parameters `yaml:"options"` -} - -// Proxy configures the registry as a pull through cache -type Proxy struct { - // RemoteURL is the URL of the remote registry - RemoteURL string `yaml:"remoteurl"` - - // Username of the hub user - Username string `yaml:"username"` - - // Password of the hub user - Password string `yaml:"password"` -} - -// Parse parses an input configuration yaml document into a Configuration struct -// This should generally be capable of handling old configuration format versions -// -// Environment variables may be used to override configuration parameters other than version, -// following the scheme below: -// Configuration.Abc may be replaced by the value of REGISTRY_ABC, -// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth -func Parse(rd io.Reader) (*Configuration, error) { - in, err := ioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - p := NewParser("registry", []VersionedParseInfo{ - { - Version: MajorMinorVersion(0, 1), - ParseAs: reflect.TypeOf(v0_1Configuration{}), - ConversionFunc: func(c interface{}) (interface{}, error) { - if v0_1, ok := c.(*v0_1Configuration); ok { - if v0_1.Loglevel == Loglevel("") { - v0_1.Loglevel = Loglevel("info") - } - if v0_1.Storage.Type() == "" { - return nil, fmt.Errorf("No storage configuration provided") - } - return (*Configuration)(v0_1), nil - } - return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) - }, - }, - }) - - config := new(Configuration) - err = p.Parse(in, config) - if err != nil { - return nil, err - } - - return config, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go deleted file mode 100644 index 8b81dd5d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go +++ /dev/null @@ -1,283 +0,0 @@ -package configuration - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "gopkg.in/yaml.v2" -) - -// Version is a major/minor version pair of the form Major.Minor -// Major version upgrades indicate structure or type changes -// Minor version upgrades should be strictly additive -type Version string - -// MajorMinorVersion constructs a Version from its Major and Minor components -func MajorMinorVersion(major, minor uint) Version { - return Version(fmt.Sprintf("%d.%d", major, minor)) -} - -func (version Version) major() (uint, error) { - majorPart := strings.Split(string(version), ".")[0] - major, err := strconv.ParseUint(majorPart, 10, 0) - return uint(major), err -} - -// Major returns the major version portion of a Version -func (version Version) Major() uint { - major, _ := version.major() - return major -} - -func (version Version) minor() (uint, error) { - minorPart := strings.Split(string(version), ".")[1] - minor, err := strconv.ParseUint(minorPart, 10, 0) - return uint(minor), err -} - -// Minor returns the minor version portion of a Version -func (version Version) Minor() uint { - minor, _ := version.minor() - return minor -} - -// VersionedParseInfo defines how a specific version of a configuration should -// be parsed into the current version -type VersionedParseInfo struct { - // Version is the version which this parsing information relates to - Version Version - // ParseAs defines the type which a configuration file of this version - // should be parsed into - ParseAs reflect.Type - // ConversionFunc defines a method for converting the parsed configuration - // (of type ParseAs) into the current configuration version - // Note: this method signature is very unclear with the absence of generics - ConversionFunc func(interface{}) (interface{}, error) -} - -type envVar struct { - name string - value string -} - -type envVars []envVar - -func (a envVars) Len() int { return len(a) } -func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } - -// Parser can be used to parse a configuration file and environment of a defined -// version into a unified output structure -type Parser struct { - prefix string - mapping map[Version]VersionedParseInfo - env envVars -} - -// NewParser returns a *Parser with the given environment prefix which handles -// versioned configurations which match the given parseInfos -func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { - p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} - - for _, parseInfo := range parseInfos { - p.mapping[parseInfo.Version] = parseInfo - } - - for _, env := range os.Environ() { - envParts := strings.SplitN(env, "=", 2) - p.env = append(p.env, envVar{envParts[0], envParts[1]}) - } - - // We must sort the environment variables lexically by name so that - // more specific variables are applied before less specific ones - // (i.e. REGISTRY_STORAGE before - // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a - // lot simpler and easier to get right than unmarshalling map entries - // into temporaries and merging with the existing entry. - sort.Sort(p.env) - - return &p -} - -// Parse reads in the given []byte and environment and writes the resulting -// configuration into the input v -// -// Environment variables may be used to override configuration parameters other -// than version, following the scheme below: -// v.Abc may be replaced by the value of PREFIX_ABC, -// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth -func (p *Parser) Parse(in []byte, v interface{}) error { - var versionedStruct struct { - Version Version - } - - if err := yaml.Unmarshal(in, &versionedStruct); err != nil { - return err - } - - parseInfo, ok := p.mapping[versionedStruct.Version] - if !ok { - return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) - } - - parseAs := reflect.New(parseInfo.ParseAs) - err := yaml.Unmarshal(in, parseAs.Interface()) - if err != nil { - return err - } - - for _, envVar := range p.env { - pathStr := envVar.name - if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { - path := strings.Split(pathStr, "_") - - err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) - if err != nil { - return err - } - } - } - - c, err := parseInfo.ConversionFunc(parseAs.Interface()) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) - return nil -} - -// overwriteFields replaces configuration values with alternate values specified -// through the environment. Precondition: an empty path slice must never be -// passed in. -func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - panic("encountered nil pointer while handling environment variable " + fullpath) - } - v = reflect.Indirect(v) - } - switch v.Kind() { - case reflect.Struct: - return p.overwriteStruct(v, fullpath, path, payload) - case reflect.Map: - return p.overwriteMap(v, fullpath, path, payload) - case reflect.Interface: - if v.NumMethod() == 0 { - if !v.IsNil() { - return p.overwriteFields(v.Elem(), fullpath, path, payload) - } - // Interface was empty; create an implicit map - var template map[string]interface{} - wrappedV := reflect.MakeMap(reflect.TypeOf(template)) - v.Set(wrappedV) - return p.overwriteMap(wrappedV, fullpath, path, payload) - } - } - return nil -} - -func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { - // Generate case-insensitive map of struct fields - byUpperCase := make(map[string]int) - for i := 0; i < v.NumField(); i++ { - sf := v.Type().Field(i) - upper := strings.ToUpper(sf.Name) - if _, present := byUpperCase[upper]; present { - panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) - } - byUpperCase[upper] = i - } - - fieldIndex, present := byUpperCase[path[0]] - if !present { - logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) - return nil - } - field := v.Field(fieldIndex) - sf := v.Type().Field(fieldIndex) - - if len(path) == 1 { - // Env var specifies this field directly - fieldVal := reflect.New(sf.Type) - err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) - if err != nil { - return err - } - field.Set(reflect.Indirect(fieldVal)) - return nil - } - - // If the field is nil, must create an object - switch sf.Type.Kind() { - case reflect.Map: - if field.IsNil() { - field.Set(reflect.MakeMap(sf.Type)) - } - case reflect.Ptr: - if field.IsNil() { - field.Set(reflect.New(sf.Type)) - } - } - - err := p.overwriteFields(field, fullpath, path[1:], payload) - if err != nil { - return err - } - - return nil -} - -func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { - if m.Type().Key().Kind() != reflect.String { - // non-string keys unsupported - logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) - return nil - } - - if len(path) > 1 { - // If a matching key exists, get its value and continue the - // overwriting process. - for _, k := range m.MapKeys() { - if strings.ToUpper(k.String()) == path[0] { - mapValue := m.MapIndex(k) - // If the existing value is nil, we want to - // recreate it instead of using this value. - if (mapValue.Kind() == reflect.Ptr || - mapValue.Kind() == reflect.Interface || - mapValue.Kind() == reflect.Map) && - mapValue.IsNil() { - break - } - return p.overwriteFields(mapValue, fullpath, path[1:], payload) - } - } - } - - // (Re)create this key - var mapValue reflect.Value - if m.Type().Elem().Kind() == reflect.Map { - mapValue = reflect.MakeMap(m.Type().Elem()) - } else { - mapValue = reflect.New(m.Type().Elem()) - } - if len(path) > 1 { - err := p.overwriteFields(mapValue, fullpath, path[1:], payload) - if err != nil { - return err - } - } else { - err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) - if err != nil { - return err - } - } - - m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD deleted file mode 100644 index 29f6bae1..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD +++ /dev/null @@ -1,36 +0,0 @@ -# Apache HTTPd sample for Registry v1, v2 and mirror - -3 containers involved - -* Docker Registry v1 (registry 0.9.1) -* Docker Registry v2 (registry 2.0.0) -* Docker Registry v1 in mirror mode - -HTTP for mirror and HTTPS for v1 & v2 - -* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode -* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode - -## 3 Docker containers should be started - -* Docker Registry 1.0 in Mirror mode : port 5001 -* Docker Registry 1.0 in Hosting mode : port 5000 -* Docker Registry 2.0 in Hosting mode : port 5002 - -### Registry v1 - - docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" - -### Mirror - - docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ - -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" - -### Registry v2 - - docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" - -# For Hosting mode access - -* users should have account (valid-user) to be able to fetch images -* only users using account docker-deployer will be allowed to push images diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf deleted file mode 100644 index 3300a7c0..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf +++ /dev/null @@ -1,127 +0,0 @@ -# -# Sample Apache 2.x configuration where : -# - - - - ServerName registry.example.com - ServerAlias www.registry.example.com - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - ProxyPass /_ping http://localhost:5001/_ping - ProxyPassReverse /_ping http://localhost:5001/_ping - - ProxyPass /v1 http://localhost:5001/v1 - ProxyPassReverse /v1 http://localhost:5001/v1 - - # Logs - ErrorLog ${APACHE_LOG_DIR}/mirror_error_log - CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog - - - - - - - ServerName registry.example.com - ServerAlias www.registry.example.com - - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt - SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key - - # Higher Strength SSL Ciphers - SSLProtocol all -SSLv2 -SSLv3 -TLSv1 - SSLCipherSuite RC4-SHA:HIGH - SSLHonorCipherOrder on - - # Logs - ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log - CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog - - Header always set "Docker-Distribution-Api-Version" "registry/2.0" - Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" - RequestHeader set X-Forwarded-Proto "https" - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - # - # Registry v1 - # - - ProxyPass /v1 http://localhost:5000/v1 - ProxyPassReverse /v1 http://localhost:5000/v1 - - ProxyPass /_ping http://localhost:5000/_ping - ProxyPassReverse /_ping http://localhost:5000/_ping - - # Authentication require for push - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer account only - - Require user docker-deployer - - - - - # Allow ping to run unauthenticated. - - Satisfy any - Allow from all - - - # Allow ping to run unauthenticated. - - Satisfy any - Allow from all - - - # - # Registry v2 - # - - ProxyPass /v2 http://localhost:5002/v2 - ProxyPassReverse /v2 http://localhost:5002/v2 - - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer only - - Require user docker-deployer - - - - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh deleted file mode 100644 index d907cf5c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh +++ /dev/null @@ -1,119 +0,0 @@ -#! /bin/bash -# -# Ceph cluster setup in Circle CI -# - -set -x -set -e -set -u - -NODE=$(hostname) -CEPHDIR=/tmp/ceph - -mkdir cluster -pushd cluster - -# Install -retries=0 -until [ $retries -ge 5 ]; do - pip install ceph-deploy && break - retries=$[$retries+1] - sleep 30 -done - -retries=0 -until [ $retries -ge 5 ]; do - ceph-deploy install --release hammer $NODE && break - retries=$[$retries+1] - sleep 30 -done - -retries=0 -until [ $retries -ge 5 ]; do - ceph-deploy pkg --install librados-dev $NODE && break - retries=$[$retries+1] - sleep 30 -done - -echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts -ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" -cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys -ssh-keyscan $NODE >> ~/.ssh/known_hosts -ceph-deploy new $NODE - -cat >> ceph.conf < 74acc70fa106 - Removing intermediate container edb84c2b40cb - Successfully built 74acc70fa106 - - The commmand outputs its progress until it completes. - -4. Start your configuration with compose. - - $ docker-compose up - Recreating compose_registryv1_1... - Recreating compose_registryv2_1... - Recreating compose_nginx_1... - Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 - ... - - -5. In another terminal, display the running configuration. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 - 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 - aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 - -### Explore a bit - -1. Check for TLS on your `nginx` server. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -2. Tag the `v1` registry image. - - $ docker tag registry:latest localhost:5000/registry_one:latest - -2. Push it to the localhost. - - $ docker push localhost:5000/registry_one:latest - - If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. - -4. Use `curl` to list the image in the registry. - - $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 32777 (#0) - > GET /v2/registry1/tags/list HTTP/1.1 - > User-Agent: curl/7.36.0 - > Host: localhost:32777 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Tue, 14 Apr 2015 22:34:13 GMT - < Content-Length: 39 - < - {"name":"registry1","tags":["latest"]} - * Connection #0 to host localhost left intact - - This example refers to the specific port assigned to the 2.0 registry. You saw - this port earlier, when you used `docker ps` to show your running containers. - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml deleted file mode 100644 index 5cd04858..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile deleted file mode 100644 index 2b252ec7..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM nginx:1.7 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf deleted file mode 100644 index 65c4d776..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf deleted file mode 100644 index 7b039a54..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf +++ /dev/null @@ -1,7 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf deleted file mode 100644 index 63cd180d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf +++ /dev/null @@ -1,27 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf deleted file mode 100644 index 47ffd237..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - # To add basic authentication to v2 use auth_basic setting plus add_header - # auth_basic "registry.localhost"; - # auth_basic_user_file test.password; - # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile deleted file mode 100644 index 8cc504c3..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM debian:jessie - -MAINTAINER Docker Distribution Team - -# compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - # For DIND - ca-certificates \ - curl \ - iptables \ - procps \ - e2fsprogs \ - xz-utils \ - # For build - build-essential \ - file \ - git \ - net-tools \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - -# Install Docker -ENV VERSION 1.7.1 -RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ - && chmod +x /usr/local/bin/docker - -# Install DIND -RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/v1.8.1/hack/dind \ - && chmod +x /dind - -# Install bats -RUN cd /usr/local/src/ \ - && git clone https://github.com/sstephenson/bats.git \ - && cd bats \ - && ./install.sh /usr/local - -# Install docker-compose -RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ - && chmod +x /usr/local/bin/docker-compose - -RUN mkdir -p /go/src/github.com/docker/distribution -WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration - -VOLUME /var/lib/docker - -ENTRYPOINT ["/dind"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md deleted file mode 100644 index e5708501..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Docker Registry Integration Testing - -These integration tests cover interactions between the Docker daemon and the -registry server. All tests are run using the docker cli. - -The compose configuration is intended to setup a testing environment for Docker -using multiple registry configurations. These configurations include different -combinations of a v1 and v2 registry as well as TLS configurations. - -## Running inside of Docker -### Get integration container -The container image to run the integation tests will need to be pulled or built -locally. - -*Building locally* -``` -$ docker build -t distribution/docker-integration . -``` - -### Run script - -Invoke the tests within Docker through the `run.sh` script. - -``` -$ ./run.sh -``` - -Run with aufs driver and tmp volume -**NOTE: Using a volume will prevent multiple runs from needing to -re-pull images** -``` -$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh -``` - -### Example developer flow - -These tests are useful for developing both as a registry and docker -core developer. The following setup may be used to do integration -testing between development versions - -Insert into your `.zshrc` or `.bashrc` - -``` -# /usr/lib/docker for Docker-in-Docker -# Set this directory to make each invocation run much faster, without -# the need to repull images. -export DOCKER_VOLUME=$HOME/.docker-test-volume - -# Use overlay for all Docker testing, try aufs if overlay not supported -export DOCKER_GRAPHDRIVER=overlay - -# Name this according to personal preference -function rdtest() { - if [ "$1" != "" ]; then - DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker - if [ ! -f $DOCKER_BINARY ]; then - current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` - echo "$DOCKER_BINARY does not exist" - echo "Current checked out docker version: $current_version" - echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" - return 1 - fi - fi - - $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh -} -``` - -Run with Docker release version -``` -$ rdtest -``` - -Run using local development version of docker -``` -$ cd $GOPATH/src/github.com/docker/docker -$ make binary -$ rdtest `cat VERSION` -``` - -## Running manually outside of Docker - -### Install Docker Compose - -[Docker Compose Installation Guide](https://docs.docker.com/compose/install/) - -### Start compose setup -``` -docker-compose up -``` - -### Install Certificates -The certificates must be installed in /etc/docker/cert.d in order to use TLS -client auth and use the CA certificate. -``` -sudo sh ./install_certs.sh -``` - -### Test with Docker -Tag an image as with any other private registry. Attempt to push the image. - -``` -docker pull hello-world -docker tag hello-world localhost:5440/hello-world -docker push localhost:5440/hello-world - -docker tag hello-world localhost:5441/hello-world -docker push localhost:5441/hello-world -# Perform login using user `testuser` and password `passpassword` -``` - -### Set /etc/hosts entry -Find the non-localhost ip address of local machine - -### Run bats -Run the bats tests after updating /etc/hosts, installing the certificates, and -running the `docker-compose` script. -``` -bats -p . -``` - -## Configurations - -Port | V2 | V1 | TLS | Authentication ---- | --- | --- | --- | --- -5000 | yes | yes | no | none -5001 | no | yes | no | none -5002 | yes | no | no | none -5011 | no | yes | yes | none -5440 | yes | yes | yes | none -5441 | yes | yes | yes | basic (testuser/passpassword) -5442 | yes | yes | yes | TLS client -5443 | yes | yes | yes | TLS client (no CA) -5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) -5445 | yes | yes | yes (no CA) | none -5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) -5447 | yes | yes | yes (no CA) | TLS client -5448 | yes | yes | yes (SSLv3) | none diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml deleted file mode 100644 index d664c7bd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml +++ /dev/null @@ -1,27 +0,0 @@ -nginx: - build: "nginx" - ports: - - "5000:5000" - - "5001:5001" - - "5002:5002" - - "5011:5011" - - "5440:5440" - - "5441:5441" - - "5442:5442" - - "5443:5443" - - "5444:5444" - - "5445:5445" - - "5446:5446" - - "5447:5447" - - "5448:5448" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry:0.9.1 - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash deleted file mode 100644 index 60d96ae0..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash +++ /dev/null @@ -1,21 +0,0 @@ -# Start docker daemon -function start_daemon() { - # Drivers to use for Docker engines the tests are going to create. - STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} - EXEC_DRIVER=${EXEC_DRIVER:-native} - - docker --daemon --log-level=panic \ - --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & - DOCKER_PID=$! - - # Wait for it to become reachable. - tries=10 - until docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - echo >&2 "error: daemon failed to start" - exit 1 - fi - sleep 1 - done -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh deleted file mode 100644 index c1fa2b20..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -set -e - -hostname=$1 -if [ "$hostname" = "" ]; then - hostname="localhost" -fi - -mkdir -p /etc/docker/certs.d/$hostname:5011 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5440 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5441 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5442 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5443 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt -cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert -cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5444 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5447 -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5448 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile deleted file mode 100644 index 04515e8c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM nginx:1.9 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf -COPY registry-noauth.conf /etc/nginx/registry-noauth.conf -COPY registry-basic.conf /etc/nginx/registry-basic.conf -COPY test.passwd /etc/nginx/test.passwd -COPY ssl /etc/nginx/ssl diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf deleted file mode 100644 index 65c4d776..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf deleted file mode 100644 index 5b1a2d58..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf +++ /dev/null @@ -1,7 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf deleted file mode 100644 index 63cd180d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf +++ /dev/null @@ -1,27 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf deleted file mode 100644 index 3c629ae8..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf +++ /dev/null @@ -1,13 +0,0 @@ -client_max_body_size 0; -chunked_transfer_encoding on; -location /v2/ { - auth_basic "registry.localhost"; - auth_basic_user_file test.passwd; - add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - include docker-registry-v2.conf; -} -location / { - auth_basic "registry.localhost"; - auth_basic_user_file test.passwd; - include docker-registry.conf; -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf deleted file mode 100644 index 883a2d48..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf +++ /dev/null @@ -1,8 +0,0 @@ -client_max_body_size 0; -chunked_transfer_encoding on; -location /v2/ { - include docker-registry-v2.conf; -} -location / { - include docker-registry.conf; -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf deleted file mode 100644 index b402eacb..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf +++ /dev/null @@ -1,277 +0,0 @@ -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - -# No client auth or TLS (V1 Only) -server { - listen 5001; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - include docker-registry.conf; - } -} - -# No client auth or TLS (V2 Only) -server { - listen 5002; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - include docker-registry-v2.conf; - } -} - -# TLS localhost (V1 Only) -server { - listen 5011; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - - client_max_body_size 0; - chunked_transfer_encoding on; - location / { - include docker-registry.conf; - } -} - -# TLS localregistry (V1 Only) -server { - listen 5011; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - - client_max_body_size 0; - chunked_transfer_encoding on; - location / { - include docker-registry.conf; - } -} - - - -# TLS Configuration chart -# Username/Password: testuser/passpassword -# | ca | client | basic | notes -# 5440 | yes | no | no | Tests CA certificate -# 5441 | yes | no | yes | Tests basic auth over TLS -# 5442 | yes | yes | no | Tests client auth with client CA -# 5443 | yes | yes | no | Tests client auth without client CA -# 5444 | yes | yes | yes | Tests using basic auth + tls auth -# 5445 | no | no | no | Tests insecure using TLS -# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS -# 5447 | no | yes | no | Tests client auth to insecure -# 5448 | yes | no | no | Bad SSL version - -server { - listen 5440; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - include registry-noauth.conf; -} - -server { - listen 5441; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - include registry-basic.conf; -} - -server { - listen 5442; - listen 5443; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5444; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-basic.conf; -} - -server { - listen 5445; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - include registry-noauth.conf; -} - -server { - listen 5446; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - include registry-basic.conf; -} - -server { - listen 5447; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5448; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_protocols SSLv3; - include registry-noauth.conf; -} - -# Add configuration for localregistry server_name -# Requires configuring /etc/hosts to use -# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing -# Docker secure/insecure registry features -server { - listen 5440; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - include registry-noauth.conf; -} - -server { - listen 5441; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - include registry-basic.conf; -} - -server { - listen 5442; - listen 5443; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5444; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-basic.conf; -} - -server { - listen 5445; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - include registry-noauth.conf; -} - -server { - listen 5446; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - include registry-basic.conf; -} - -server { - listen 5447; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5448; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_protocols SSLv3; - include registry-noauth.conf; -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd deleted file mode 100644 index 4e55de81..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd +++ /dev/null @@ -1 +0,0 @@ -testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh deleted file mode 100644 index c8831a09..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env bash -set -e -set -x - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -source helpers.bash - -# Root directory of Distribution -DISTRIBUTION_ROOT=$(cd ../..; pwd -P) - -volumeMount="" -if [ "$DOCKER_VOLUME" != "" ]; then - volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" -fi - -dockerMount="" -if [ "$DOCKER_BINARY" != "" ]; then - dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" -else - DOCKER_BINARY=docker -fi - -# Image containing the integration tests environment. -INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} - -if [ "$1" == "-d" ]; then - start_daemon - shift -fi - -TESTS=${@:-.} - -# Make sure we upgrade the integration environment. -docker pull $INTEGRATION_IMAGE - -# Start a Docker engine inside a docker container -ID=$(docker run -d -it --privileged $volumeMount $dockerMount \ - -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ - -e "DOCKER_GRAPHDRIVER=$DOCKER_GRAPHDRIVER" \ - ${INTEGRATION_IMAGE} \ - ./run_engine.sh) - -# Stop container on exit -trap "docker rm -f -v $ID" EXIT - - -# Wait for it to become reachable. -tries=10 -until docker exec "$ID" docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - echo >&2 "error: daemon failed to start" - exit 1 - fi - sleep 1 -done - -# If no volume is specified, transfer images into the container from -# the outer docker instance -if [ "$DOCKER_VOLUME" == "" ]; then - # Make sure we have images outside the container, to transfer to the container. - # Not much will happen here if the images are already present. - docker-compose pull - docker-compose build - - # Transfer images to the inner container. - for image in "$INTEGRATION_IMAGE" registry:0.9.1 dockerintegration_nginx dockerintegration_registryv2; do - docker save "$image" | docker exec -i "$ID" docker load - done -fi - -# Run the tests. -docker exec -it "$ID" sh -c "./test_runner.sh $TESTS" - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh deleted file mode 100644 index d14384cf..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -set -e -set -x - -DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-overlay} -EXEC_DRIVER=${EXEC_DRIVER:-native} - -# Set IP address in /etc/hosts for localregistry -IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') -echo "$IP localregistry" >> /etc/hosts - -sh install_certs.sh localregistry - -DOCKER_VERSION=$(docker --version | cut -d ' ' -f3 | cut -d ',' -f1) -major=$(echo "$DOCKER_VERSION"| cut -d '.' -f1) -minor=$(echo "$DOCKER_VERSION"| cut -d '.' -f2) - -daemonOpts="daemon" -if [ $major -le 1 ] && [ $minor -lt 9 ]; then - daemonOpts="--daemon" -fi - -docker $daemonOpts --log-level=debug --storage-driver="$DOCKER_GRAPHDRIVER" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh deleted file mode 100644 index 73a70c7e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -# Run the integration tests with multiple versions of the Docker engine - -set -e -set -x - -source helpers.bash - -if [ `uname` = "Linux" ]; then - tmpdir_template="$TMPDIR/docker-versions.XXXXX" -else - # /tmp isn't available for mounting in boot2docker - tmpdir_template="`pwd`/../../../docker-versions.XXXXX" -fi - -tmpdir=`mktemp -d "$tmpdir_template"` -trap "rm -rf $tmpdir" EXIT - -if [ "$1" == "-d" ]; then - start_daemon -fi - -# Released versions - -versions="1.6.1 1.7.1 1.8.3 1.9.1" - -for v in $versions; do - echo "Extracting Docker $v from dind image" - binpath="$tmpdir/docker-$v/docker" - ID=$(docker create dockerswarm/dind:$v) - docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" - - echo "Running tests with Docker $v" - DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" ./run.sh - - # Cleanup. - docker rm -f "$ID" -done - -# Latest experimental version - -echo "Extracting Docker master from dind image" -binpath="$tmpdir/docker-master/docker" -docker pull dockerswarm/dind-master -ID=$(docker create dockerswarm/dind-master) -docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" - -echo "Running tests with Docker master" -DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" ./run.sh - -# Cleanup. -docker rm -f "$ID" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh deleted file mode 100644 index 0a628238..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -TESTS=${@:-.} - -function execute() { - >&2 echo "++ $@" - eval "$@" -} - -execute time docker-compose build - -execute docker-compose up -d - -# Run the tests. -execute time bats -p $TESTS diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats deleted file mode 100644 index 8b7ae287..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats +++ /dev/null @@ -1,102 +0,0 @@ -# Registry host name, should be set to non-localhost address and match -# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d -hostname="localregistry" - -image="hello-world:latest" - -# Login information, should match values in nginx/test.passwd -user="testuser" -password="passpassword" -email="distribution@docker.com" - -function setup() { - docker pull $image -} - -# skip basic auth tests with Docker 1.6, where they don't pass due to -# certificate issues -function basic_auth_version_check() { - run sh -c 'docker version | fgrep -q "Client version: 1.6."' - if [ "$status" -eq 0 ]; then - skip "Basic auth tests don't support 1.6.x" - fi -} - -# has_digest enforces the last output line is "Digest: sha256:..." -# the input is the name of the array containing the output lines -function has_digest() { - filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') - [ "$filtered" != "" ] -} - -function login() { - run docker login -u $user -p $password -e $email $1 - [ "$status" -eq 0 ] - # First line is WARNING about credential save - [ "${lines[1]}" = "Login Succeeded" ] -} - -@test "Test valid certificates" { - docker tag -f $image $hostname:5440/$image - run docker push $hostname:5440/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test basic auth" { - basic_auth_version_check - login $hostname:5441 - docker tag -f $image $hostname:5441/$image - run docker push $hostname:5441/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test TLS client auth" { - docker tag -f $image $hostname:5442/$image - run docker push $hostname:5442/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test TLS client with invalid certificate authority fails" { - docker tag -f $image $hostname:5443/$image - run docker push $hostname:5443/$image - [ "$status" -ne 0 ] -} - -@test "Test basic auth with TLS client auth" { - basic_auth_version_check - login $hostname:5444 - docker tag -f $image $hostname:5444/$image - run docker push $hostname:5444/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test unknown certificate authority fails" { - docker tag -f $image $hostname:5445/$image - run docker push $hostname:5445/$image - [ "$status" -ne 0 ] -} - -@test "Test basic auth with unknown certificate authority fails" { - run login $hostname:5446 - [ "$status" -ne 0 ] - docker tag -f $image $hostname:5446/$image - run docker push $hostname:5446/$image - [ "$status" -ne 0 ] -} - -@test "Test TLS client auth to server with unknown certificate authority fails" { - docker tag -f $image $hostname:5447/$image - run docker push $hostname:5447/$image - [ "$status" -ne 0 ] -} - -@test "Test failure to connect to server fails to fallback to SSLv3" { - docker tag -f $image $hostname:5448/$image - run docker push $hostname:5448/$image - [ "$status" -ne 0 ] -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile deleted file mode 100644 index 89ee3d29..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM docs/base:latest -MAINTAINER Mary Anthony (@moxiegirl) - -RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine -RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose -RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine -RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry -RUN svn checkout https://github.com/kitematic/kitematic/trunk/docs /docs/content/kitematic -RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials -RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource - -ENV PROJECT=registry - -# To get the git info for this repo -COPY . /src - -COPY . /docs/content/$PROJECT/ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile deleted file mode 100644 index 021e8f6e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate - -# env vars passed through directly to Docker's build scripts -# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily -# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these -DOCKER_ENVS := \ - -e BUILDFLAGS \ - -e DOCKER_CLIENTONLY \ - -e DOCKER_EXECDRIVER \ - -e DOCKER_GRAPHDRIVER \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds - -# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) -DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) - -# to allow `make DOCSPORT=9000 docs` -DOCSPORT := 8000 - -# Get the IP ADDRESS -DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") -HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") -HUGO_BIND_IP=0.0.0.0 - -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) - - -DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE - -# for some docs workarounds (see below in "docs-build" target) -GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) - -default: docs - -docs: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) - -docs-draft: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) - - -docs-shell: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash - - -docs-build: -# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files -# echo "$(GIT_BRANCH)" > GIT_BRANCH -# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET -# echo "$(GITCOMMIT)" > GITCOMMIT - docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md deleted file mode 100644 index c3d3afd4..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md +++ /dev/null @@ -1,213 +0,0 @@ - - -# Authenticating proxy with apache - -## Use-case - -People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. - -Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. - -### Alternatives - -If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). - -### Solution - -With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. - -While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the exemple. - -We also implement push restriction (to a limited user group) for the sake of the exemple. Again, you should modify this to fit your mileage. - -### Gotchas - -While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. - -Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. - -## Setting things up - -Read again [the requirements](recipes.md#requirements). - -Ready? - -Run the following script: - -``` -mkdir -p auth -mkdir -p data - -# This is the main apache configuration you will use -cat < auth/httpd.conf -LoadModule headers_module modules/mod_headers.so - -LoadModule authn_file_module modules/mod_authn_file.so -LoadModule authn_core_module modules/mod_authn_core.so -LoadModule authz_groupfile_module modules/mod_authz_groupfile.so -LoadModule authz_user_module modules/mod_authz_user.so -LoadModule authz_core_module modules/mod_authz_core.so -LoadModule auth_basic_module modules/mod_auth_basic.so -LoadModule access_compat_module modules/mod_access_compat.so - -LoadModule log_config_module modules/mod_log_config.so - -LoadModule ssl_module modules/mod_ssl.so - -LoadModule proxy_module modules/mod_proxy.so -LoadModule proxy_http_module modules/mod_proxy_http.so - -LoadModule unixd_module modules/mod_unixd.so - - - SSLRandomSeed startup builtin - SSLRandomSeed connect builtin - - - - User daemon - Group daemon - - -ServerAdmin you@example.com - -ErrorLog /proc/self/fd/2 - -LogLevel warn - - - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined - LogFormat "%h %l %u %t \"%r\" %>s %b" common - - - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio - - - CustomLog /proc/self/fd/1 common - - -ServerRoot "/usr/local/apache2" - -Listen 5043 - - - AllowOverride none - Require all denied - - - - - ServerName myregistrydomain.com - - SSLEngine on - SSLCertificateFile /usr/local/apache2/conf/domain.crt - SSLCertificateKeyFile /usr/local/apache2/conf/domain.key - - ## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html - # Anti CRIME - SSLCompression off - - # POODLE and other stuff - SSLProtocol all -SSLv2 -SSLv3 -TLSv1 - - # Secure cypher suites - SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH - SSLHonorCipherOrder on - - Header always set "Docker-Distribution-Api-Version" "registry/2.0" - Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" - RequestHeader set X-Forwarded-Proto "https" - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - ProxyPass /v2 http://registry:5000/v2 - ProxyPassReverse /v2 http://registry:5000/v2 - - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd" - AuthGroupFile "/usr/local/apache2/conf/httpd.groups" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer only - - Require group pusher - - - - - -EOF - -# Now, create a password file for "testuser" and "testpassword" -docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd -# Create another one for "testuserpush" and "testpasswordpush" -docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd - -# Create your group file -echo "pusher: testuserpush" > auth/httpd.groups - -# Copy over your certificate files -cp domain.crt auth -cp domain.key auth - -# Now create your compose file - -cat < docker-compose.yml -apache: - image: "httpd:2.4" - hostname: myregistrydomain.com - ports: - - 5043:5043 - links: - - registry:registry - volumes: - - `pwd`/auth:/usr/local/apache2/conf - -registry: - image: registry:2 - ports: - - 127.0.0.1:5000:5000 - volumes: - - `pwd`/data:/var/lib/registry - -EOF -``` - -## Starting and stopping - -Now, start your stack: - - docker-compose up -d - -Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: - - docker login myregistrydomain.com:5043 - docker tag ubuntu myregistrydomain.com:5043/test - docker push myregistrydomain.com:5043/test - -Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image: - - docker login myregistrydomain.com:5043 - docker pull myregistrydomain.com:5043/test - -Verify that the "pull-only" can NOT push: - - docker push myregistrydomain.com:5043/test diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md deleted file mode 100644 index 39251760..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md +++ /dev/null @@ -1,54 +0,0 @@ - - -# Architecture - -## Design -**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. - -### Eventual Consistency - -> **NOTE:** This section belongs somewhere, perhaps in a design document. We -> are leaving this here so the information is not lost. - -Running the registry on eventually consistent backends has been part of the -design from the beginning. This section covers some of the approaches to -dealing with this reality. - -There are a few classes of issues that we need to worry about when -implementing something on top of the storage drivers: - -1. Read-After-Write consistency (see this [article on - s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). -2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). - -In reality, the registry must worry about these kinds of errors when doing the -following: - -1. Accepting data into a temporary upload file may not have latest data block - yet (read-after-write). -2. Moving uploaded data into its blob location (write-write race). -3. Modifying the "current" manifest for given tag (write-write race). -4. A whole slew of operations around deletes (read-after-write, delete-write - races, garbage collection, etc.). - -The backend path layout employs a few techniques to avoid these problems: - -1. Large writes are done to private upload directories. This alleviates most - of the corruption potential under multiple writers by avoiding multiple - writers. -2. Constraints in storage driver implementations, such as support for writing - after the end of a file to extend it. -3. Digest verification to avoid data corruption. -4. Manifest files are stored by digest and cannot change. -5. All other non-content files (links, hashes, etc.) are written as an atomic - unit. Anything that requires additions and deletions is broken out into - separate "files". Last writer still wins. - -Unfortunately, one must play this game when trying to build something like -this on top of eventually consistent storage systems. If we run into serious -problems, we can wrap the storagedrivers in a shared consistency layer but -that would increase complexity and hinder registry cluster performance. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md deleted file mode 100644 index 5672210b..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md +++ /dev/null @@ -1,158 +0,0 @@ - - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/tools/godep github.com/golang/lint/golint - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ GOPATH=`godep path`:$GOPATH make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Developing - -The above approaches are helpful for small experimentation. If more complex -tasks are at hand, it is recommended to employ the full power of `godep`. - -The Makefile is designed to have its `GOPATH` defined externally. This allows -one to experiment with various development environment setups. This is -primarily useful when testing upstream bugfixes, by modifying local code. This -can be demonstrated using `godep` to migrate the `GOPATH` to use the specified -dependencies. The `GOPATH` can be migrated to the current package versions -declared in `Godeps` with the following command: - - godep restore - -> **WARNING:** This command will checkout versions of the code specified in -> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is -> undesired, it is recommended to create a workspace devoted to work on the -> _Distribution_ project. - -With a successful run of the above command, one can now use `make` without -specifying the `GOPATH`: - - make - -If that is successful, standard `go` commands, such as `go test` should work, -per package, without issue. - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. - -To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) -(librados-dev and librbd-dev will be required to build the bindings): - - export DOCKER_BUILDTAGS='include_rados' diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md deleted file mode 100644 index 1c1c99cc..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md +++ /dev/null @@ -1,1814 +0,0 @@ - - -# Registry Configuration Reference - -The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. - -## Override specific configuration options - -In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. - -To override a configuration option, create an environment variable named -`REGISTRY_variable` where *`variable`* is the name of the configuration option -and the `_` (underscore) represents indention levels. For example, you can -configure the `rootdirectory` of the `filesystem` storage backend: - - storage: - filesystem: - rootdirectory: /var/lib/registry - -To override this value, set an environment variable like this: - - REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere - -This variable overrides the `/var/lib/registry` value to the `/somewhere` -directory. - ->**NOTE**: It is highly recommended to create a base configuration file with which environment variables can be used to tweak individual values. Overriding configuration sections with environment variables is not recommended. - -## Overriding the entire configuration file - -If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. - -Typically, create a new configuration file from scratch, and call it `config.yml`, then: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/config.yml:/etc/docker/registry/config.yml \ - registry:2 - -You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). - -## List of configuration options - -This section lists all the registry configuration options. Some options in -the list are mutually exclusive. So, make sure to read the detailed reference -information about each option that appears later in this page. - - version: 0.1 - log: - level: debug - formatter: text - fields: - service: registry - environment: staging - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com - loglevel: debug # deprecated: use "log" - storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - gcs: - bucket: bucketname - keyfile: /path/to/keyfile - rootdirectory: /gcs/object/name/prefix - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - rados: - poolname: radospool - username: radosuser - chunksize: 4194304 - swift: - username: username - password: password - authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - oss: - accesskeyid: accesskeyid - accesskeysecret: accesskeysecret - region: OSS region name - endpoint: optional endpoints - internal: optional internal endpoint - bucket: OSS bucket - encrypt: optional data encryption setting - secure: optional ssl setting - chunksize: optional size valye - rootdirectory: optional root directory - inmemory: # This driver takes no parameters - delete: - enabled: false - redirect: - disable: false - cache: - blobdescriptor: redis - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - readonly: - enabled: false - auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd - middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 - reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true - http: - addr: localhost:5000 - prefix: /my/nested/registry/ - host: https://myregistryaddress.org:5000 - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] - notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 - redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 - file: - - file: /path/to/checked/file - interval: 10s - http: - - uri: http://server.to.check/must/return/200 - headers: - Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] - statuscode: 200 - timeout: 3s - interval: 10s - threshold: 3 - tcp: - - addr: redis-server.domain.com:6379 - timeout: 3s - interval: 10s - threshold: 3 - proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] - -In some instances a configuration option is **optional** but it contains child -options marked as **required**. This indicates that you can omit the parent with -all its children. However, if the parent is included, you must also include all -the children marked **required**. - -## version - - version: 0.1 - -The `version` option is **required**. It specifies the configuration's version. -It is expected to remain a top-level field, to allow for a consistent version -check before parsing the remainder of the configuration file. - -## log - -The `log` subsection configures the behavior of the logging system. The logging -system outputs everything to stdout. You can adjust the granularity and format -with this configuration section. - - log: - level: debug - formatter: text - fields: - service: registry - environment: staging - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- level - - no - - Sets the sensitivity of logging output. Permitted values are - error, warn, info and - debug. The default is info. -
- formatter - - no - - This selects the format of logging output. The format primarily affects how keyed - attributes for a log line are encoded. Options are text, json or - logstash. The default is text. -
- fields - - no - - A map of field names to values. These are added to every log line for - the context. This is useful for identifying log messages source after - being mixed in other systems. -
- -## hooks - - hooks: - - type: mail - levels: - - panic - options: - smtp: - addr: smtp.sendhost.com:25 - username: sendername - password: password - insecure: true - from: name@sendhost.com - to: - - name@receivehost.com - -The `hooks` subsection configures the logging hooks' behavior. This subsection -includes a sequence handler which you can use for sending mail, for example. -Refer to `loglevel` to configure the level of messages printed. - -## loglevel - -> **DEPRECATED:** Please use [log](#log) instead. - - loglevel: debug - -Permitted values are `error`, `warn`, `info` and `debug`. The default is -`info`. - -## storage - - storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - gcs: - bucket: bucketname - keyfile: /path/to/keyfile - rootdirectory: /gcs/object/name/prefix - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - rados: - poolname: radospool - username: radosuser - chunksize: 4194304 - swift: - username: username - password: password - authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - oss: - accesskeyid: accesskeyid - accesskeysecret: accesskeysecret - region: OSS region name - endpoint: optional endpoints - internal: optional internal endpoint - bucket: OSS bucket - encrypt: optional data encryption setting - secure: optional ssl setting - chunksize: optional size valye - rootdirectory: optional root directory - inmemory: - delete: - enabled: false - cache: - blobdescriptor: inmemory - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - redirect: - disable: false - -The storage option is **required** and defines which storage backend is in use. -You must configure one backend; if you configure more, the registry returns an error. You can choose any of these backend storage drivers: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
filesystemUses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. - See the driver's reference documentation. -
azureUses Microsoft's Azure Blob Storage. - See the driver's reference documentation. -
gcsUses Google Cloud Storage. - See the driver's reference documentation. -
radosUses Ceph Object Storage. - See the driver's reference documentation. -
s3Uses Amazon's Simple Storage Service (S3). - See the driver's reference documentation. -
swiftUses Openstack Swift object storage. - See the driver's reference documentation. -
ossUses Aliyun OSS for object storage. - See the driver's reference documentation. -
- -For purely tests purposes, you can use the [`inmemory` storage -driver](storage-drivers/inmemory.md). If you would like to run a registry from -volatile memory, use the [`filesystem` driver](storage-drivers/filesystem.md) on -a ramdisk. - -If you are deploying a registry on Windows, be aware that a Windows volume -mounted from the host is not recommended. Instead, you can use a S3, or Azure, -backing data-store. If you do use a Windows volume, you must ensure that the -`PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 -characters). Failure to do so can result in the following error message: - - mkdir /XXX protocol error and your registry will not function properly. - -### Maintenance - -Currently upload purging and read-only mode are the only maintenance functions available. -These and future maintenance functions which are related to storage can be configured under -the maintenance section. - -### Upload Purging - -Upload purging is a background process that periodically removes orphaned files from the upload -directories of the registry. Upload purging is enabled by default. To -configure upload directory purging, the following parameters -must be set. - - -| Parameter | Required | Description - --------- | -------- | ----------- -`enabled` | yes | Set to true to enable upload purging. Default=true. | -`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) -`interval` | yes | The interval between upload directory purging. Default=24h. -`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. - -Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). - -### Read-only mode - -If the `readonly` section under `maintenance` has `enabled` set to `true`, -clients will not be allowed to write to the registry. This mode is useful to -temporarily prevent writes to the backend storage so a garbage collection pass -can be run. Before running garbage collection, the registry should be -restarted with readonly's `enabled` set to true. After the garbage collection -pass finishes, the registry may be restarted again, this time with `readonly` -removed from the configuration (or set to false). - -### delete - -Use the `delete` subsection to enable the deletion of image blobs and manifests -by digest. It defaults to false, but it can be enabled by writing the following -on the configuration file: - - delete: - enabled: true - -### cache - -Use the `cache` subsection to enable caching of data accessed in the storage -backend. Currently, the only available cache provides fast access to layer -metadata. This, if configured, uses the `blobdescriptor` field. - -You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses -a Redis pool to cache layer metadata. The `inmemory` value uses an in memory -map. - ->**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these ->are equivalent, `layerinfo` has been deprecated, in favor or ->`blobdescriptor`. - -### redirect - -The `redirect` subsection provides configuration for managing redirects from -content backends. For backends that support it, redirecting is enabled by -default. Certain deployment scenarios may prefer to route all data through the -Registry, rather than redirecting to the backend. This may be more efficient -when using a backend that is not co-located or when a registry instance is -doing aggressive caching. - -Redirects can be disabled by adding a single flag `disable`, set to `true` -under the `redirect` section: - - redirect: - disable: true - - -## auth - - auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd - -The `auth` option is **optional**. There are -currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only -one `auth` provider. - -### silly - -The `silly` auth is only for development purposes. It simply checks for the -existence of the `Authorization` header in the HTTP request. It has no regard for -the header's value. If the header does not exist, the `silly` auth responds with a -challenge response, echoing back the realm, service, and scope that access was -denied for. - -The following values are used to configure the response: - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- service - - yes - - The service being authenticated. -
- - - -### token - -Token based authentication allows the authentication system to be decoupled from -the registry. It is a well established authentication paradigm with a high -degree of security. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- service - - yes - - The service being authenticated. -
- issuer - - yes - -The name of the token issuer. The issuer inserts this into -the token so it must match the value configured for the issuer. -
- rootcertbundle - - yes - -The absolute path to the root certificate bundle. This bundle contains the -public part of the certificates that is used to sign authentication tokens. -
- -For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). - -### htpasswd - -The _htpasswd_ authentication backed allows one to configure basic auth using an -[Apache htpasswd -file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). Only -[`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are supported. -Entries with other hash types will be ignored. The htpasswd file is loaded once, -at startup. If the file is invalid, the registry will display an error and will -not start. - -> __WARNING:__ This authentication scheme should only be used with TLS -> configured, since basic authentication sends passwords as part of the http -> header. - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- path - - yes - - Path to htpasswd file to load at startup. -
- -## middleware - -The `middleware` option is **optional**. Use this option to inject middleware at -named hook points. All middleware must implement the same interface as the -object they're wrapping. This means a registry middleware must implement the -`distribution.Namespace` interface, repository middleware must implement -`distribution.Repository`, and storage middleware must implement -`driver.StorageDriver`. - -Currently only one middleware, `cloudfront`, a storage middleware, is supported -in the registry implementation. - - middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 - -Each middleware entry has `name` and `options` entries. The `name` must -correspond to the name under which the middleware registers itself. The -`options` field is a map that details custom configuration required to -initialize the middleware. It is treated as a `map[string]interface{}`. As such, -it supports any interesting structures desired, leaving it up to the middleware -initialization function to best determine how to handle the specific -interpretation of the options. - -### cloudfront - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- baseurl - - yes - - SCHEME://HOST[/PATH] at which Cloudfront is served. -
- privatekey - - yes - - Private Key for Cloudfront provided by AWS. -
- keypairid - - yes - - Key pair ID provided by AWS. -
- duration - - no - - Duration for which a signed URL should be valid. -
- - -## reporting - - reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true - -The `reporting` option is **optional** and configures error and metrics -reporting tools. At the moment only two services are supported, [New -Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid -configuration may contain both. - -### bugsnag - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- apikey - - yes - - API Key provided by Bugsnag -
- releasestage - - no - - Tracks where the registry is deployed, for example, - production,staging, or - development. -
- endpoint - - no - - Specify the enterprise Bugsnag endpoint. -
- - -### newrelic - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- licensekey - - yes - - License key provided by New Relic. -
- name - - no - - New Relic application name. -
- verbose - - no - - Enable New Relic debugging output on stdout. -
- -## http - - http: - addr: localhost:5000 - net: tcp - prefix: /my/nested/registry/ - host: https://myregistryaddress.org:5000 - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] - -The `http` option details the configuration for the HTTP server that hosts the registry. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - - The address for which the server should accept connections. The form depends on a network type (see net option): - HOST:PORT for tcp and FILE for a unix socket. -
- net - - no - - The network which is used to create a listening socket. Known networks are unix and tcp. - The default empty value means tcp. -
- prefix - - no - -If the server does not run at the root path use this value to specify the -prefix. The root path is the section before v2. It -should have both preceding and trailing slashes, for example /path/. -
- host - - no - -This parameter specifies an externally-reachable address for the registry, as a -fully qualified URL. If present, it is used when creating generated URLs. -Otherwise, these URLs are derived from client requests. -
- secret - - yes - -A random piece of data. This is used to sign state that may be stored with the -client to protect against tampering. For production environments you should generate a -random piece of data using a cryptographically secure random generator. This -configuration parameter may be omitted, in which case the registry will automatically -generate a secret at launch. -

-WARNING: If you are building a cluster of registries behind a load balancer, you MUST -ensure the secret is the same for all registries. -

- - -### tls - -The `tls` struct within `http` is **optional**. Use this to configure TLS -for the server. If you already have a server such as Nginx or Apache running on -the same host as the registry, you may prefer to configure TLS termination there -and proxy connections to the registry server. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- certificate - - yes - - Absolute path to x509 cert file -
- key - - yes - - Absolute path to x509 private key file. -
- clientcas - - no - - An array of absolute paths to a x509 CA file -
- - -### debug - -The `debug` option is **optional** . Use it to configure a debug server that -can be helpful in diagnosing problems. The debug endpoint can be used for -monitoring registry metrics and health, as well as profiling. Sensitive -information may be available via the debug endpoint. Please be certain that -access to the debug endpoint is locked down in a production environment. - -The `debug` section takes a single, required `addr` parameter. This parameter -specifies the `HOST:PORT` on which the debug server should accept connections. - - -### headers - -The `headers` option is **optional** . Use it to specify headers that the HTTP -server should include in responses. This can be used for security headers such -as `Strict-Transport-Security`. - -The `headers` option should contain an option for each header to include, where -the parameter name is the header's name, and the parameter value a list of the -header's payload values. - -Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers -will not interpret content as HTML if they are directed to load a page from the -registry. This header is included in the example configuration files. - - -## notifications - - notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 - -The notifications option is **optional** and currently may contain a single -option, `endpoints`. - -### endpoints - -Endpoints is a list of named services (URLs) that can accept event notifications. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- name - - yes - -A human readable name for the service. -
- disabled - - no - -A boolean to enable/disable notifications for a service. -
- url - - yes - -The URL to which events should be published. -
- headers - - yes - - Static headers to add to each request. Each header's name should be a key - underneath headers, and each value is a list of payloads for that - header name. Note that values must always be lists. -
- timeout - - yes - - An HTTP timeout value. This field takes a positive integer and an optional - suffix indicating the unit of time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- threshold - - yes - - An integer specifying how long to wait before backing off a failure. -
- backoff - - yes - - How long the system backs off before retrying. This field takes a positive - integer and an optional suffix indicating the unit of time. Possible units - are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- - -## redis - - redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -Declare parameters for constructing the redis connections. Registry instances -may use the Redis instance for several applications. The current purpose is -caching information about immutable blobs. Most of the options below control -how the registry connects to redis. You can control the pool's behavior -with the [pool](#pool) subsection. - -It's advisable to configure Redis itself with the **allkeys-lru** eviction policy -as the registry does not set an expire value on keys. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - - Address (host and port) of redis instance. -
- password - - no - - A password used to authenticate to the redis instance. -
- db - - no - - Selects the db for each connection. -
- dialtimeout - - no - - Timeout for connecting to a redis instance. -
- readtimeout - - no - - Timeout for reading from redis connections. -
- writetimeout - - no - - Timeout for writing to redis connections. -
- - -### pool - - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -Configure the behavior of the Redis connection pool. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- maxidle - - no - - Sets the maximum number of idle connections. -
- maxactive - - no - - sets the maximum number of connections that should - be opened before blocking a connection request. -
- idletimeout - - no - - sets the amount time to wait before closing - inactive connections. -
- -## health - - health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 - file: - - file: /path/to/checked/file - interval: 10s - http: - - uri: http://server.to.check/must/return/200 - headers: - Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] - statuscode: 200 - timeout: 3s - interval: 10s - threshold: 3 - tcp: - - addr: redis-server.domain.com:6379 - timeout: 3s - interval: 10s - threshold: 3 - -The health option is **optional**. It may contain preferences for a periodic -health check on the storage driver's backend storage, and optional periodic -checks on local files, HTTP URIs, and/or TCP servers. The results of the health -checks are available at /debug/health on the debug HTTP server if the debug -HTTP server is enabled (see http section). - -### storagedriver - -storagedriver contains options for a health check on the configured storage -driver's backend storage. enabled must be set to true for this health check to -be active. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- enabled - - yes - -"true" to enable the storage driver health check or "false" to disable it. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- threshold - - no - - An integer specifying the number of times the check must fail before the - check triggers an unhealthy state. If this filed is not specified, a - single failure will trigger an unhealthy state. -
- -### file - -file is a list of paths to be periodically checked for the existence of a file. -If a file exists at the given path, the health check will fail. This can be -used as a way of bringing a registry out of rotation by creating a file. - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- file - - yes - -The path to check for the existence of a file. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- -### http - -http is a list of HTTP URIs to be periodically checked with HEAD requests. If -a HEAD request doesn't complete or returns an unexpected status code, the -health check will fail. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- uri - - yes - -The URI to check. -
- headers - - no - - Static headers to add to each request. Each header's name should be a key - underneath headers, and each value is a list of payloads for that - header name. Note that values must always be lists. -
- statuscode - - no - -Expected status code from the HTTP URI. Defaults to 200. -
- timeout - - no - - The length of time to wait before timing out the HTTP request. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- threshold - - no - - An integer specifying the number of times the check must fail before the - check triggers an unhealthy state. If this filed is not specified, a - single failure will trigger an unhealthy state. -
- -### tcp - -tcp is a list of TCP addresses to be periodically checked with connection -attempts. The addresses must include port numbers. If a connection attempt -fails, the health check will fail. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - -The TCP address to connect to, including a port number. -
- timeout - - no - - The length of time to wait before timing out the TCP connection. This - field takes a positive integer and an optional suffix indicating the unit - of time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- threshold - - no - - An integer specifying the number of times the check must fail before the - check triggers an unhealthy state. If this filed is not specified, a - single failure will trigger an unhealthy state. -
- -## Proxy - - proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] - -Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- remoteurl - - yes - - The URL of the official Docker Hub -
- username - - no - - The username of the Docker Hub account -
- password - - no - - The password for the official Docker Hub account -
- -To enable pulling private repositories (e.g. `batman/robin`) a username and password for user `batman` must be specified. Note: These private repositories will be stored in the proxy cache's storage and relevant measures should be taken to protect access to this. - - -## Example: Development configuration - -The following is a simple example you can use for local development: - - version: 0.1 - log: - level: debug - storage: - filesystem: - rootdirectory: /var/lib/registry - http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - -The above configures the registry instance to run on port `5000`, binding to -`localhost`, with the `debug` server enabled. Registry data storage is in the -`/var/lib/registry` directory. Logging is in `debug` mode, which is the most -verbose. - -A similar simple configuration is available at -[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). -Both are generally useful for local development. - - -## Example: Middleware configuration - -This example illustrates how to configure storage middleware in a registry. -Middleware allows the registry to serve layers via a content delivery network -(CDN). This is useful for reducing requests to the storage layer. - -Currently, the registry supports [Amazon -Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in -conjunction with the S3 storage driver. - - - - - - - - - - - - - - - - - - -
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: - A set of key/value options to configure the middleware. -
    -
  • baseurl: The Cloudfront base URL.
  • -
  • privatekey: The location of your AWS private key on the filesystem.
  • -
  • keypairid: The ID of your Cloudfront keypair.
  • -
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • -
-
- -The following example illustrates these values: - - middleware: - storage: - - name: cloudfront - disabled: false - options: - baseurl: http://d111111abcdef8.cloudfront.net - privatekey: /path/to/asecret.pem - keypairid: asecret - duration: 60 - - ->**Note**: Cloudfront keys exist separately to other AWS keys. See ->[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) ->for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md deleted file mode 100644 index 77e8f05c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md +++ /dev/null @@ -1,231 +0,0 @@ - - -# Deploying a registry server - -You need to [install Docker version 1.6.0 or newer](https://docs.docker.com/installation/). - -## Running on localhost - -Start your registry: - - docker run -d -p 5000:5000 --restart=always --name registry registry:2 - -You can now use it with docker. - -Get any image from the hub and tag it to point to your registry: - - docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu - -... then push it to your registry: - - docker push localhost:5000/ubuntu - -... then pull it back from your registry: - - docker pull localhost:5000/ubuntu - -To stop your registry, you would: - - docker stop registry && docker rm -v registry - -## Storage - -By default, your registry data is persisted as a [docker volume](https://docs.docker.com/userguide/dockervolumes/) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. - -Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/data:/var/lib/registry \ - registry:2 - -### Alternatives - -You should usually consider using [another storage backend](https://github.com/docker/distribution/blob/master/docs/storagedrivers.md) instead of the local filesystem. Use the [storage configuration options](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) to configure an alternate storage backend. - -Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. - -## Running a domain registry - -While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. - -### Get a certificate - -Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. - -Create a `certs` directory: - - mkdir -p certs - -Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`. - -Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/certs:/certs \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - registry:2 - -You should now be able to access your registry from another docker host: - - docker pull ubuntu - docker tag ubuntu myregistrydomain.com:5000/ubuntu - docker push myregistrydomain.com:5000/ubuntu - docker pull myregistrydomain.com:5000/ubuntu - -#### Gotcha - -A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: - - cat domain.crt intermediate-certificates.pem > certs/domain.crt - -### Alternatives - -While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). - -## Load Balancing Considerations - -One may want to use a load balancer to distribute load, terminate TLS or -provide high availability. While a full load balancing setup is outside the -scope of this document, there are a few considerations that can make the process -smoother. - -The most important aspect is that a load balanced cluster of registries must -share the same resources. For the current version of the registry, this means -the following must be the same: - - - Storage Driver - - HTTP Secret - - Redis Cache (if configured) - -If any of these are different, the registry will have trouble serving requests. -As an example, if you're using the filesystem driver, all registry instances -must have access to the same filesystem root, which means they should be in -the same machine. For other drivers, such as s3 or azure, they should be -accessing the same resource, and will likely share an identical configuration. -The _HTTP Secret_ coordinates uploads, so also must be the same across -instances. Configuring different redis instances will work (at the time -of writing), but will not be optimal if the instances are not shared, causing -more requests to be directed to the backend. - -Getting the headers correct is very important. For all responses to any -request under the "/v2/" url space, the `Docker-Distribution-API-Version` -header should be set to the value "registry/2.0", even for a 4xx response. -This header allows the docker engine to quickly resolve authentication realms -and fallback to version 1 registries, if necessary. Confirming this is setup -correctly can help avoid problems with fallback. - -In the same train of thought, you must make sure you are properly sending the -`X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side" -values. Failure to do so usually makes the registry issue redirects to internal -hostnames or downgrading from https to http. - -A properly secured registry should return 401 when the "/v2/" endpoint is hit -without credentials. The response should include a `WWW-Authenticate` -challenge, providing guidance on how to authenticate, such as with basic auth -or a token service. If the load balancer has health checks, it is recommended -to configure it to consider a 401 response as healthy and any other as down. -This will secure your registry by ensuring that configuration problems with -authentication don't accidentally expose an unprotected registry. If you're -using a less sophisticated load balancer, such as Amazon's Elastic Load -Balancer, that doesn't allow one to change the healthy response code, health -checks can be directed at "/", which will always return a `200 OK` response. - -## Restricting access - -Except for registries running on secure local networks, registries should always implement access restrictions. - -### Native basic auth - -The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). - -> **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. - -First create a password file with one entry for the user "testuser", with password "testpassword": - - mkdir auth - docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd - -Make sure you stopped your registry from the previous step, then start it again: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/auth:/auth \ - -e "REGISTRY_AUTH=htpasswd" \ - -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ - -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ - -v `pwd`/certs:/certs \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - registry:2 - -You should now be able to: - - docker login myregistrydomain.com:5000 - -And then push and pull images as an authenticated user. - -#### Gotcha - -Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md). - -### Alternatives - -1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes.md). - -2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. - -You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). - -Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation. - -## Managing with Compose - -As your registry configuration grows more complex, dealing with it can quickly become tedious. - -It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate operating your registry. - -Here is a simple `docker-compose.yml` example that condenses everything explained so far: - -``` -registry: - restart: always - image: registry:2 - ports: - - 5000:5000 - environment: - REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt - REGISTRY_HTTP_TLS_KEY: /certs/domain.key - REGISTRY_AUTH: htpasswd - REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd - REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm - volumes: - - /path/data:/var/lib/registry - - /path/certs:/certs - - /path/auth:/auth -``` - -> **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. - -You can then start your registry with a simple - - docker-compose up -d - -## Next - -You will find more specific and advanced informations in the following sections: - - - [Configuration reference](configuration.md) - - [Working with notifications](notifications.md) - - [Advanced "recipes"](recipes.md) - - [Registry API](spec/api.md) - - [Storage driver model](storagedrivers.md) - - [Token authentication](spec/auth/token.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md deleted file mode 100644 index 8159b520..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md +++ /dev/null @@ -1,70 +0,0 @@ - - -# Glossary - -This page contains definitions for distribution related terms. - -
-

Blob

-
-
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
-

- Layers are a good example of "blobs". -

-
- -

Image

-
-
An image is a named set of immutable data from which a Docker container can be created.
-

- An image is represented by a json file called a manifest, and is conceptually a set of layers. - - Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. - -

-
- -

Layer

-
-
A layer is a tar archive bundling partial content from a filesystem.
-

- Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. -

-
- -

Manifest

-
A manifest is the JSON representation of an image.
- -

Namespace

-
A namespace is a collection of repositories with a common name prefix.
-

- The namespace with an empty prefix is considered the Global Namespace. -

-
- -

Registry

-
A registry is a service that let you store and deliver images.
-
- -

Repository

-
-
A repository is a set of data containing all versions of a given image.
-
- -

Scope

-
A scope is the portion of a namespace onto which a given authorization token is granted.
- -

Tag

-
A tag is conceptually a "version" of a named image.
-

- Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". -

- -
- - -
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md deleted file mode 100644 index c6ac7ad9..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md +++ /dev/null @@ -1,24 +0,0 @@ - - -# Getting help - -If you need help, or just want to chat, you can reach us: - -- on irc: `#docker-distribution` on freenode -- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) - -If you want to report a bug: - -- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) -- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) - -You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/opensource/get-help/). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy deleted file mode 100644 index 5ecf4c3a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png deleted file mode 100644 index 09de8d23..00000000 Binary files a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg deleted file mode 100644 index 6c3d680b..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg +++ /dev/null @@ -1 +0,0 @@ -Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md deleted file mode 100644 index 6cd6769e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md +++ /dev/null @@ -1,66 +0,0 @@ - - -# Docker Registry - -## What it is - -The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. -The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). - -## Why use it - -You should use the Registry if you want to: - - * tightly control where your images are being stored - * fully own your images distribution pipeline - * integrate image storage and distribution tightly into your in-house development workflow - -## Alternatives - -Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). - -Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). - -## Requirements - -The Registry is compatible with Docker engine **version 1.6.0 or higher**. -If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry). - -## TL;DR - -Start your registry - - docker run -d -p 5000:5000 --name registry registry:2 - -Pull (or build) some image from the hub - - docker pull ubuntu - -Tag the image so that it points to your registry - - docker tag ubuntu localhost:5000/myfirstimage - -Push it - - docker push localhost:5000/myfirstimage - -Pull it back - - docker pull localhost:5000/myfirstimage - -Now stop your registry and remove all data - - docker stop registry && docker rm -v registry - -## Next - -You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). - \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md deleted file mode 100644 index 9ccb5419..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md +++ /dev/null @@ -1,90 +0,0 @@ - - -# Insecure Registry - -While it's highly recommended to secure your registry using a TLS certificate issued by a known CA, you may alternatively decide to use self-signed certificates, or even use your registry over plain http. - -You have to understand the downsides in doing so, and the extra burden in configuration. - -## Deploying a plain HTTP registry - -> **Warning**: it's not possible to use an insecure registry with basic authentication - -This basically tells Docker to entirely disregard security for your registry. - -1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`) -2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` - -**Pros:** - - - relatively easy to configure - -**Cons:** - - - this is **very** insecure: you are basically exposing yourself to trivial MITM, and this solution should only be used for isolated testing or in a tightly controlled, air-gapped environment - - you have to configure every docker daemon that wants to access your registry - -## Using self-signed certificates - -> **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below) - -Generate your own certificate: - - mkdir -p certs && openssl req \ - -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ - -x509 -days 365 -out certs/domain.crt - -Be sure to use the name `myregistrydomain.com` as a CN. - -Use the result to [start your registry with TLS enabled](https://github.com/docker/distribution/blob/master/docs/deploying.md#get-a-certificate) - -Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`. - -Don't forget to restart docker after doing so. - -**Pros:** - - - more secure than the insecure registry solution - -**Cons:** - - - you have to configure every docker daemon that wants to access your registry - -## Failing... - -Failing to configure docker and trying to pull from a registry that is not using TLS will result in the following message: - -``` -FATA[0000] Error response from daemon: v1 ping attempt failed with error: -Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. -If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add -`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. -In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; -simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt -``` - -## Docker still complains about the certificate when using authentication? - -When using authentication, some versions of docker also require you to trust the certificate at the OS level. - -Usually, on Ubuntu this is done with: - - cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt - update-ca-certificates - -... and on Red Hat (and its derivatives) with: - - cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt - update-ca-trust - -... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled: - - update-ca-trust enable - -Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md deleted file mode 100644 index aefefc34..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md +++ /dev/null @@ -1,55 +0,0 @@ - - -# Understanding the Registry - -A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. - - > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. - -Users interact with a registry by using docker push and pull commands. - - > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. - -Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, Ceph Rados, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). - -Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. - -The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. - -Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. - -## Understanding image naming - -Image names as used in typical docker commands reflect their origin: - - * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command - * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` - -You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). - -## Use cases - -Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. - -It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. - -Finally, it's the best way to distribute images inside an isolated network. - -## Requirements - -You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. - -Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. - -## Next - -Dive into [deploying your registry](deploying.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md deleted file mode 100644 index da0aba91..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md +++ /dev/null @@ -1,30 +0,0 @@ - - -# Migrating a 1.0 registry to 2.0 - -TODO: This needs to be revised in light of Olivier's work - -A few thoughts here: - -There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. -The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. -One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. - ------ - -The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: - -1. Configure and test a 2.0 registry image in a sandbox environment. - -2. Back up up your production image storage. - - Your production image storage should reside on a volume or storage backend. - Make sure you have a backup of its contents. - -3. Stop your existing registry service. - -4. Restart your registry with your tested 2.0 image. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md deleted file mode 100644 index feb2630c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md +++ /dev/null @@ -1,72 +0,0 @@ - - -# Registry as a pull through cache - -## Use-case - -If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network. - -### Alternatives - -Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry. - -Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario. - -### Gotcha - -It's currently not possible to mirror another private registry. Only the central Hub can be mirrored. - -### Solution - -The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. - -## How does it work? - -The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. - -### What if the content changes on the Hub? - -When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. - -### What about my disk? - -In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. - -To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. - -## Running a Registry as a pull through cache - -The easiest way to run a registry as a pull through cache is to run the official Registry image. - -Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. - -### Configuring the cache - -To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. - -In order to access private images on the Docker Hub, a username and password can be supplied. - - proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] - -> :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! - -### Configuring the Docker daemon - -You will need to pass the `--registry-mirror` option to your Docker daemon on startup: - - docker --registry-mirror=https:// daemon - -For example, if your mirror is serving on http://10.0.0.2:5000, you would run: - - docker --registry-mirror=https://10.0.0.2:5000 daemon - -NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md deleted file mode 100644 index 17b92f44..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md +++ /dev/null @@ -1,188 +0,0 @@ - - -# Authenticating proxy with nginx - - -## Use-case - -People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. - -Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. - -### Alternatives - -If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). - -### Solution - -With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. - -While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example. - -We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. - -### Gotchas - -While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. - -Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required. - -For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: - -``` -X-Real-IP -X-Forwarded-For -X-Forwarded-Proto -``` - -So if you have an nginx sitting behind it, should remove these lines from the example config below: - -``` -X-Real-IP $remote_addr; # pass on real client's IP -X-Forwarded-For $proxy_add_x_forwarded_for; -X-Forwarded-Proto $scheme; -``` - -Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). - -## Setting things up - -Read again [the requirements](recipes.md#requirements). - -Ready? - --- - -Create the required directories - -``` -mkdir -p auth -mkdir -p data -``` - -Create the main nginx configuration you will use. - -``` - -cat < auth/nginx.conf -events { - worker_connections 1024; -} - -http { - - upstream docker-registry { - server registry:5000; - } - - ## Set a variable to help us decide if we need to add the - ## 'Docker-Distribution-Api-Version' header. - ## The registry always sets this header. - ## In the case of nginx performing auth, the header will be unset - ## since nginx is auth-ing before proxying. - map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version { - 'registry/2.0' ''; - default registry/2.0; - } - - server { - listen 443 ssl; - server_name myregistrydomain.com; - - # SSL - ssl_certificate /etc/nginx/conf.d/domain.crt; - ssl_certificate_key /etc/nginx/conf.d/domain.key; - - # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.1 TLSv1.2; - ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; - ssl_prefer_server_ciphers on; - ssl_session_cache shared:SSL:10m; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { - return 404; - } - - # To add basic authentication to v2 use auth_basic setting. - auth_basic "Registry realm"; - auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; - - ## If $docker_distribution_api_version is empty, the header will not be added. - ## See the map directive above where this variable is defined. - add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always; - - proxy_pass http://docker-registry; - proxy_set_header Host \$http_host; # required for docker client's sake - proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP - proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto \$scheme; - proxy_read_timeout 900; - } - } -} -EOF -``` - -Now create a password file for "testuser" and "testpassword" - -``` -docker run --rm --entrypoint htpasswd registry:2 -bn testuser testpassword > auth/nginx.htpasswd -``` - -Copy over your certificate files - -``` -cp domain.crt auth -cp domain.key auth -``` - -Now create your compose file - -``` -cat < docker-compose.yml -nginx: - image: "nginx:1.9" - ports: - - 5043:443 - links: - - registry:registry - volumes: - - ./auth:/etc/nginx/conf.d - - ./auth/nginx.conf:/etc/nginx/nginx.conf:ro - -registry: - image: registry:2 - ports: - - 127.0.0.1:5000:5000 - volumes: - - `pwd`./data:/var/lib/registry -EOF -``` - -## Starting and stopping - -Now, start your stack: - - docker-compose up -d - -Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image: - - docker login -p=testuser -u=testpassword -e=root@example.ch myregistrydomain.com:5043 - docker tag ubuntu myregistrydomain.com:5043/test - docker push myregistrydomain.com:5043/test - docker pull myregistrydomain.com:5043/test diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md deleted file mode 100644 index a832239d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md +++ /dev/null @@ -1,320 +0,0 @@ - - -# Notifications - -The Registry supports sending webhook notifications in response to events -happening within the registry. Notifications are sent in response to manifest -pushes and pulls and layer pushes and pulls. These actions are serialized into -events. The events are queued into a registry-internal broadcast system which -queues and dispatches events to [_Endpoints_](#endpoints). - -![](images/notifications.png) - -## Endpoints - -Notifications are sent to _endpoints_ via HTTP requests. Each configured -endpoint has isolated queues, retry configuration and http targets within each -instance of a registry. When an action happens within the registry, it is -converted into an event which is dropped into an inmemory queue. When the -event reaches the end of the queue, an http request is made to the endpoint -until the request succeeds. The events are sent serially to each endpoint but -order is not guaranteed. - -## Configuration - -To setup a registry instance to send notifications to endpoints, one must add -them to the configuration. A simple example follows: - - notifications: - endpoints: - - name: alistener - url: https://mylistener.example.com/event - headers: - Authorization: [Bearer ] - timeout: 500ms - threshold: 5 - backoff: 1s - -The above would configure the registry with an endpoint to send events to -`https://mylistener.example.com/event`, with the header "Authorization: Bearer -". The request would timeout after 500 milliseconds. If -5 failures happen consecutively, the registry will backoff for 1 second before -trying again. - -For details on the fields, please see the [configuration documentation](configuration.md#notifications). - -A properly configured endpoint should lead to a log message from the registry -upon startup: - -``` -INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry -``` - -## Events - -Events have a well-defined JSON structure and are sent as the body of -notification requests. One or more events are sent in a structure called an -envelope. Each event has a unique id that can be used to uniquely identify incoming -requests, if required. Along with that, an _action_ is provided with a -_target, identifying the object mutated during the event. - -The fields available in an event are described in detail in the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). - -**TODO:** Let's break out the fields here rather than rely on the godoc. - -The following is an example of a JSON event, sent in response to the push of a -manifest: - -```json -{ - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 1, - "digest": "sha256:0123456789abcdef0", - "length": 1, - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } -} -``` - -> __NOTE:__ As of version 2.1, the `length` field for event targets -> is being deprecated for the `size` field, bringing the target in line with -> common nomenclature. Both will continue to be set for the foreseeable -> future. Newer code should favor `size` but accept either. - -## Envelope - -The envelope contains one or more events, with the following json structure: - -```json -{ - "events": [ ... ], -} -``` - -While events may be sent in the same envelope, the set of events within that -envelope have no implied relationship. For example, the registry may choose to -group unrelated events and send them in the same envelope to reduce the total -number of requests. - -The full package has the mediatype -"application/vnd.docker.distribution.events.v1+json", which will be set on the -request coming to an endpoint. - -An example of a full event may look as follows: - -```json -GET /callback -Host: application/vnd.docker.distribution.events.v1+json -Authorization: Bearer -Content-Type: application/vnd.docker.distribution.events.v1+json - -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, - "digest": "sha256:0123456789abcdef0", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 2, - "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 3, - "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} -``` - -## Responses - -The registry is fairly accepting of the response codes from endpoints. If an -endpoint responds with any 2xx or 3xx response code (after following -redirects), the message will be considered delivered and discarded. - -In turn, it is recommended that endpoints are accepting of incoming responses, -as well. While the format of event envelopes are standardized by media type, -any "pickyness" about validation may cause the queue to backup on the -registry. - -## Monitoring - -The state of the endpoints are reported via the debug/vars http interface, -usually configured to `http://localhost:5001/debug/vars`. Information such as -configuration and metrics are available by endpoint. - -The following provides an example of a few endpoints that have experienced -several failures and have since recovered: - -```json -"notifications":{ - "endpoints":[ - { - "name":"local-5003", - "url":"http://localhost:5003/callback", - "Headers":{ - "Authorization":[ - "Bearer \u003can example token\u003e" - ] - }, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":76, - "Events":76, - "Successes":0, - "Failures":0, - "Errors":46, - "Statuses":{ - - } - } - }, - { - "name":"local-8083", - "url":"http://localhost:8083/callback", - "Headers":null, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":0, - "Events":76, - "Successes":76, - "Failures":0, - "Errors":28, - "Statuses":{ - "202 Accepted":76 - } - } - } - ] -} -``` - -If using notification as part of a larger application, it is _critical_ to -monitor the size ("Pending" above) of the endpoint queues. If failures or -queue sizes are increasing, it can indicate a larger problem. - -The logs are also a valuable resource for monitoring problems. A failing -endpoint will lead to messages similar to the following: - -``` -ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying -WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off -``` - -The above indicates that several errors have led to a backoff and the registry -will wait before retrying. - -## Considerations - -Currently, the queues are inmemory, so endpoints should be _reasonably -reliable_. They are designed to make a best-effort to send the messages but if -an instance is lost, messages may be dropped. If an endpoint goes down, care -should be taken to ensure that the registry instance is not terminated before -the endpoint comes back up or messages will be lost. - -This can be mitigated by running endpoints in close proximity to the registry -instances. One could run an endpoint that pages to disk and then forwards a -request to provide better durability. - -The notification system is designed around a series of interchangeable _sinks_ -which can be wired up to achieve interesting behavior. If this system doesn't -provide acceptable guarantees, adding a transactional `Sink` to the registry -is a possibility, although it may have an effect on request service time. -Please see the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) -for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md deleted file mode 100644 index 15a26ff4..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md +++ /dev/null @@ -1,79 +0,0 @@ - - -# OS X Setup Guide - -## Use-case - -This is useful if you intend to run a registry server natively on OS X. - -### Alternatives - -You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. - -The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](https://docs.docker.com/machine/), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. - -### Solution - -Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. - -### Gotchas - -Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. - -## Setup golang on your machine - -If you know, safely skip to the next section. - -If you don't, the TLDR is: - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) - source ~/.gvm/scripts/gvm - gvm install go1.4.2 - gvm use go1.4.2 - -If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html). - -## Checkout the Docker Distribution source tree - - mkdir -p $GOPATH/src/github.com/docker - git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution - cd $GOPATH/src/github.com/docker/distribution - -## Build the binary - - GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries - sudo cp bin/registry /usr/local/libexec/registry - -## Setup - -Copy the registry configuration file in place: - - mkdir /Users/Shared/Registry - cp docs/osx/config.yml /Users/Shared/Registry/config.yml - -## Running the Docker Registry under launchd - -Copy the Docker registry plist into place: - - plutil -lint docs/osx/com.docker.registry.plist - cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ - chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist - -Start the Docker registry: - - launchctl load ~/Library/LaunchAgents/com.docker.registry.plist - -### Restarting the docker registry service - - launchctl stop com.docker.registry - launchctl start com.docker.registry - -### Unloading the docker registry service - - launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist deleted file mode 100644 index 0982349f..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist +++ /dev/null @@ -1,42 +0,0 @@ - - - - - Label - com.docker.registry - KeepAlive - - StandardErrorPath - /Users/Shared/Registry/registry.log - StandardOutPath - /Users/Shared/Registry/registry.log - Program - /usr/local/libexec/registry - ProgramArguments - - /usr/local/libexec/registry - /Users/Shared/Registry/config.yml - - Sockets - - http-listen-address - - SockServiceName - 5000 - SockType - dgram - SockFamily - IPv4 - - http-debug-address - - SockServiceName - 5001 - SockType - dgram - SockFamily - IPv4 - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml deleted file mode 100644 index 63b8f713..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: 0.1 -log: - level: info - fields: - service: registry - environment: macbook-air -storage: - cache: - blobdescriptor: inmemory - filesystem: - rootdirectory: /Users/Shared/Registry -http: - addr: 0.0.0.0:5000 - secret: mytokensecret - debug: - addr: localhost:5001 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md deleted file mode 100644 index 0b4a4ab5..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# Recipes - -You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. - -Most users are not expected to have a use for these. - -## Requirements - -You should have followed entirely the basic [deployment guide](deploying.md). - -If you have not, please take the time to do so. - -At this point, it's assumed that: - - * you understand Docker security requirements, and how to configure your docker engines properly - * you have installed Docker Compose - * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates - * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` - * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) - -## The List - - * [using Apache as an authenticating proxy](apache.md) - * [using Nginx as an authenticating proxy](nginx.md) - * [running a Registry on OS X](osx-setup-guide.md) - * [hacking the registry: build instructions](building.md) - * [mirror the Docker Hub](mirror.md) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md deleted file mode 100644 index ce4f0234..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md +++ /dev/null @@ -1,4845 +0,0 @@ - - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -[docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
-
j
-
-
    -
  • Add ability to mount blobs across repositories.
  • -
-
- -
i
-
-
    -
  • Clarified expected behavior response to manifest HEAD request.
  • -
-
- -
h
-
-
    -
  • All mention of tarsum removed.
  • -
-
- -
g
-
-
    -
  • Clarify behavior of pagination behavior with unspecified parameters.
  • -
-
- -
f
-
-
    -
  • Specify the delete API for layers and manifests.
  • -
-
- -
e
-
-
    -
  • Added support for listing registry contents.
  • -
  • Added pagination to tags API.
  • -
  • Added common approach to support pagination.
  • -
-
- -
d
-
-
    -
  • Allow repository name components to be one character.
  • -
  • Clarified that single component names are allowed.
  • -
-
- -
c
-
-
    -
  • Added section covering digest format.
  • -
  • Added more clarification that manifest cannot be deleted by tag.
  • -
-
- -
b
-
-
    -
  • Added capability of doing streaming upload to PATCH blob upload.
  • -
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • -
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • -
-
- -
a
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: -``` -digest := algorithm ":" hex -algorithm := /[A-Fa-f0-9_+.-]+/ -hex := /[A-Fa-f0-9]+/ -``` - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring `C` passed into a function, `SHA256`, that returns a -bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated -with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` -as equal to `D`. A digest can be verified by independently calculating `D` and -comparing it with identifier `ID(C)`. - -#### Digest Header - -To provide verification of http content, any response may include a `Docker- -Content-Digest` header. This will include the digest of the target entity -returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header `Docker- -> Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including digest) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -digests to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -##### Existing Manifests - -The image manifest can be checked for existence with the following url: - -``` -HEAD /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful the response will -be as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `digest`. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the digest specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the digests will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest= -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest= -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest= -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. For example, a HTTP URI parameter -might be as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -match this digest. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Cross Repository Blob Mount - -A blob may be mounted from another repository that the client has read access -to, removing the need to upload a blob already known to the registry. To issue -a blob mount instead of an upload, a POST request should be issued in the -following format: - -``` -POST /v2//blobs/uploads/?mount=&from= -Content-Length: 0 -``` - -If the blob is successfully mounted, the client will receive a `201 Created` -response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -If a mount fails due to invalid repository or digest arguments, the registry -will fall back to the standard upload behavior and return a `202 Accepted` with -the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -This behavior is consistent with older versions of the registry, which do not -recognize the repository mount query parameters. - -Note: a client may issue a HEAD request to check existence of a blob in a source -repository to distinguish between the registry not supporting blob mounts and -the blob not existing in the expected repository. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob. An error is returned for each unknown blob. The -response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. A registry -may also limit the amount of responses returned even if pagination was not -explicitly requested. In this case the `Link` header will be returned along -with the results, and subsequent results can be obtained by following the link -as if pagination had been initially requested. - -For details of the `Link` header, please see the _Pagination_ section. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been recieved. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with above value from the `Link` header, -receiving the values _c_ and _d_. Note that n may change on second to last -response or be omitted fully, if the server may so choose. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | -| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | -| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | -| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | -| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | -| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | -| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | -| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | -| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | -| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | - - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| - `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. - `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. - `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. - `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. - `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. - `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. - `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. - `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. - `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. - `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. - `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. - `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. - `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. - `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. - `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. - - - -### Base - -Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. - - - -#### GET Base - -Check that the endpoint implements Docker Registry API V2. - - - -``` -GET /v2/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| - - - - -###### On Success: OK - -``` -200 OK -``` - -The API implements V2 protocol and is accessible. - - - - -###### On Failure: Not Found - -``` -404 Not Found -``` - -The registry does not implement the V2 API. - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - - - -### Tags - -Retrieve information about tags. - - - -#### GET Tags - -Fetch the tags under the repository identified by `name`. - - -##### Tags - -``` -GET /v2//tags/list -Host: -Authorization: -``` - -Return all tags for the repository - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Tags Paginated - -``` -GET /v2//tags/list?n=&last= -``` - -Return a portion of the tags for the specified repository. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`name`|path|Name of the target repository.| -|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| -|`last`|query|Result set will include values lexically after last.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ], -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| -|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| - - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Manifest - -Create, update, delete and retrieve manifests. - - - -#### GET Manifest - -Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. - - - -``` -GET /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: OK - -``` -200 OK -Docker-Content-Digest: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -} -``` - -The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The name or reference was invalid. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### PUT Manifest - -Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -PUT /v2//manifests/ -Host: -Authorization: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -} -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Content-Digest: -``` - -The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location url of the uploaded manifest.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Manifest - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | -| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | -| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -###### On Failure: Missing Layer(s) - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -} -``` - -One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - - -#### DELETE Manifest - -Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. - - - -``` -DELETE /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -``` - - - - - - -###### On Failure: Invalid Name or Reference - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `reference` were invalid and the delete was unable to proceed. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -###### On Failure: Unknown Manifest - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - - - -### Blob - -Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. - - - -#### GET Blob - -Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. - - -##### Fetch Blob - -``` -GET /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Docker-Content-Digest: -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob content.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - -###### On Success: Temporary Redirect - -``` -307 Temporary Redirect -Location: -Docker-Content-Digest: -``` - -The blob identified by `digest` is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location where the layer should be accessible.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Fetch Blob Part - -``` -GET /v2//blobs/ -Host: -Authorization: -Range: bytes=- -``` - -This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Range`|header|HTTP Range header specifying blob chunk.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Partial Content - -``` -206 Partial Content -Content-Length: -Content-Range: bytes -/ -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob chunk.| -|`Content-Range`|Content range of blob chunk.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### DELETE Blob - -Delete the blob identified by `name` and `digest` - - - -``` -DELETE /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Docker-Content-Digest: -``` - - - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|0| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Method Not Allowed - -``` -405 Method Not Allowed -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Initiate Blob Upload - -Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. - - - -#### POST Initiate Blob Upload - -Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. - - -##### Initiate Monolithic Blob Upload - -``` -POST /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octect-stream - - -``` - -Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|| -|`name`|path|Name of the target repository.| -|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Upload-UUID: -``` - -The blob has been created in the registry and is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Initiate Resumable Blob Upload - -``` -POST /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Initiate a resumable blob upload with an empty request body. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Location: /v2//blobs/uploads/ -Range: 0-0 -Docker-Upload-UUID: -``` - -The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Mount Blob - -``` -POST /v2//blobs/uploads/?mount=&from= -Host: -Authorization: -Content-Length: 0 -``` - -Mount a blob identified by the `mount` parameter from another repository. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| -|`mount`|query|Digest of blob to mount from the source repository.| -|`from`|query|Name of the source repository.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Upload-UUID: -``` - -The blob has been mounted in the repository and is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Blob Upload - -Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. - - - -#### GET Blob Upload - -Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. - - - -``` -GET /v2//blobs/uploads/ -Host: -Authorization: -``` - -Retrieve the progress of the current upload, as reported by the `Range` header. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Progress - -``` -204 No Content -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The upload is known and in progress. The last received offset is available in the `Range` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### PATCH Blob Upload - -Upload a chunk of data for the specified upload. - - -##### Stream upload - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Type: application/octet-stream - - -``` - -Upload a stream of data to upload without completing the upload. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Data Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Chunked upload - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Chunk Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### PUT Blob Upload - -Complete the upload specified by `uuid`, optionally appending the body as the final chunk. - - - -``` -PUT /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octet-stream - - -``` - -Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| -|`digest`|query|Digest of uploaded blob.| - - - - -###### On Success: Upload Complete - -``` -204 No Content -Location: -Content-Range: - -Content-Length: 0 -Docker-Content-Digest: -``` - -The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location of the blob for retrieval| -|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### DELETE Blob Upload - -Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. - - - -``` -DELETE /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Cancel the upload specified by `uuid`. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Deleted - -``` -204 No Content -Content-Length: 0 -``` - -The upload has been successfully deleted. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -An error was encountered processing the delete. The client may ignore this error. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Catalog - -List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. - - - -#### GET Catalog - -Retrieve a sorted, json list of repositories available in the registry. - - -##### Catalog Fetch Complete - -``` -GET /v2/_catalog -``` - -Request an unabridged list of repositories available. - - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "repositories": [ - , - ... - ] -} -``` - -Returns the unabridged list of repositories as a json response. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -##### Catalog Fetch Paginated - -``` -GET /v2/_catalog?n=&last= -``` - -Return the specified portion of repositories. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| -|`last`|query|Result set will include values lexically after last.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 - -{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -} -``` - - - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| -|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| - - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl deleted file mode 100644 index 544b0bfe..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl +++ /dev/null @@ -1,1204 +0,0 @@ - - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -[docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
-
j
-
-
    -
  • Add ability to mount blobs across repositories.
  • -
-
- -
i
-
-
    -
  • Clarified expected behavior response to manifest HEAD request.
  • -
-
- -
h
-
-
    -
  • All mention of tarsum removed.
  • -
-
- -
g
-
-
    -
  • Clarify behavior of pagination behavior with unspecified parameters.
  • -
-
- -
f
-
-
    -
  • Specify the delete API for layers and manifests.
  • -
-
- -
e
-
-
    -
  • Added support for listing registry contents.
  • -
  • Added pagination to tags API.
  • -
  • Added common approach to support pagination.
  • -
-
- -
d
-
-
    -
  • Allow repository name components to be one character.
  • -
  • Clarified that single component names are allowed.
  • -
-
- -
c
-
-
    -
  • Added section covering digest format.
  • -
  • Added more clarification that manifest cannot be deleted by tag.
  • -
-
- -
b
-
-
    -
  • Added capability of doing streaming upload to PATCH blob upload.
  • -
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • -
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • -
-
- -
a
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: -``` -digest := algorithm ":" hex -algorithm := /[A-Fa-f0-9_+.-]+/ -hex := /[A-Fa-f0-9]+/ -``` - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring `C` passed into a function, `SHA256`, that returns a -bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated -with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` -as equal to `D`. A digest can be verified by independently calculating `D` and -comparing it with identifier `ID(C)`. - -#### Digest Header - -To provide verification of http content, any response may include a `Docker- -Content-Digest` header. This will include the digest of the target entity -returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header `Docker- -> Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including digest) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -digests to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -##### Existing Manifests - -The image manifest can be checked for existence with the following url: - -``` -HEAD /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful the response will -be as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `digest`. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the digest specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the digests will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest= -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest= -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest= -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. For example, a HTTP URI parameter -might be as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -match this digest. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Cross Repository Blob Mount - -A blob may be mounted from another repository that the client has read access -to, removing the need to upload a blob already known to the registry. To issue -a blob mount instead of an upload, a POST request should be issued in the -following format: - -``` -POST /v2//blobs/uploads/?mount=&from= -Content-Length: 0 -``` - -If the blob is successfully mounted, the client will receive a `201 Created` -response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -If a mount fails due to invalid repository or digest arguments, the registry -will fall back to the standard upload behavior and return a `202 Accepted` with -the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -This behavior is consistent with older versions of the registry, which do not -recognize the repository mount query parameters. - -Note: a client may issue a HEAD request to check existence of a blob in a source -repository to distinguish between the registry not supporting blob mounts and -the blob not existing in the expected repository. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob. An error is returned for each unknown blob. The -response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. A registry -may also limit the amount of responses returned even if pagination was not -explicitly requested. In this case the `Link` header will be returned along -with the results, and subsequent results can be obtained by following the link -as if pagination had been initially requested. - -For details of the `Link` header, please see the _Pagination_ section. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been recieved. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with above value from the `Link` header, -receiving the values _c_ and _d_. Note that n may change on second to last -response or be omitted fully, if the server may so choose. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | -{{end}}{{end}} - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} -{{end}} - -{{range $route := .RouteDescriptors}} -### {{.Entity}} - -{{.Description}} - -{{range $method := $route.Methods}} - -#### {{.Method}} {{$route.Entity}} - -{{.Description}} - -{{if .Requests}}{{range .Requests}}{{if .Name}} -##### {{.Name}}{{end}} - -``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if or .Headers .PathParameters .QueryParameters}} -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| -{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| -{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| -{{end}}{{end}} - -{{if .Successes}} -{{range .Successes}} -###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Fields}}The following fields may be returned in the response body: - -|Name|Description| -|----|-----------| -{{range .Fields}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{if .Headers}} -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{end}}{{end}} - -{{if .Failures}} -{{range .Failures}} -###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Headers}} -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}} - -{{if .ErrorCodes}} -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | -{{end}} - -{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} - -{{end}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md deleted file mode 100644 index b0d31256..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md +++ /dev/null @@ -1,14 +0,0 @@ - - -# Docker Registry v2 authentication - -See the [Token Authentication Specification](token.md) and -[Token Authentication Implementation](jwt.md) for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md deleted file mode 100644 index f627b17a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md +++ /dev/null @@ -1,324 +0,0 @@ - - -# Docker Registry v2 Bearer token specification - -This specification covers the `docker/distribution` implementation of the -v2 Registry's authentication schema. Specifically, it describes the JSON -Web Token schema that `docker/distribution` has adopted to implement the -client-opaque Bearer token issued by an authentication service and -understood by the registry. - -This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) - -## Getting a Bearer Token - -For this example, the client makes an HTTP GET request to the following URL: - -``` -https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push -``` - -The token server should first attempt to authenticate the client using any -authentication credentials provided with the request. As of Docker 1.8, the -registry client in the Docker Engine only supports Basic Authentication to -these token servers. If an attempt to authenticate to the token server fails, -the token server should return a `401 Unauthorized` response indicating that -the provided credentials are invalid. - -Whether the token server requires authentication is up to the policy of that -access control provider. Some requests may require authentication to determine -access (such as pushing or pulling a private repository) while others may not -(such as pulling from a public repository). - -After authenticating the client (which may simply be an anonymous client if -no attempt was made to authenticate), the token server must next query its -access control list to determine whether the client has the requested scope. In -this example request, if I have authenticated as user `jlhawn`, the token -server will determine what access I have to the repository `samalba/my-app` -hosted by the entity `registry.docker.io`. - -Once the token server has determined what access the client has to the -resources requested in the `scope` parameter, it will take the intersection of -the set of requested actions on each resource and the set of actions that the -client has in fact been granted. If the client only has a subset of the -requested access **it must not be considered an error** as it is not the -responsibility of the token server to indicate authorization errors as part of -this workflow. - -Continuing with the example request, the token server will find that the -client's set of granted access to the repository is `[pull, push]` which when -intersected with the requested access `[pull, push]` yields an equal set. If -the granted access set was found only to be `[pull]` then the intersected set -would only be `[pull]`. If the client has no access to the repository then the -intersected set would be empty, `[]`. - -It is this intersected set of access which is placed in the returned token. - -The server will now construct a JSON Web Token to sign and return. A JSON Web -Token has 3 main parts: - -1. Headers - - The header of a JSON Web Token is a standard JOSE header. The "typ" field - will be "JWT" and it will also contain the "alg" which identifies the - signing algorithm used to produce the signature. It will also usually have - a "kid" field, the ID of the key which was used to sign the token. - - Here is an example JOSE Header for a JSON Web Token (formatted with - whitespace for readability): - - ``` - { - "typ": "JWT", - "alg": "ES256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" - } - ``` - - It specifies that this object is going to be a JSON Web token signed using - the key with the given ID using the Elliptic Curve signature algorithm - using a SHA256 hash. - -2. Claim Set - - The Claim Set is a JSON struct containing these standard registered claim - name fields: - -
-
- iss (Issuer) -
-
- The issuer of the token, typically the fqdn of the authorization - server. -
-
- sub (Subject) -
-
- The subject of the token; the name or id of the client which - requested it. This should be empty (`""`) if the client did not - authenticate. -
-
- aud (Audience) -
-
- The intended audience of the token; the name or id of the service - which will verify the token to authorize the client/subject. -
-
- exp (Expiration) -
-
- The token should only be considered valid up to this specified date - and time. -
-
- nbf (Not Before) -
-
- The token should not be considered valid before this specified date - and time. -
-
- iat (Issued At) -
-
- Specifies the date and time which the Authorization server - generated this token. -
-
- jti (JWT ID) -
-
- A unique identifier for this token. Can be used by the intended - audience to prevent replays of the token. -
-
- - The Claim Set will also contain a private claim name unique to this - authorization server specification: - -
-
- access -
-
- An array of access entry objects with the following fields: - -
-
- type -
-
- The type of resource hosted by the service. -
-
- name -
-
- The name of the resource of the given type hosted by the - service. -
-
- actions -
-
- An array of strings which give the actions authorized on - this resource. -
-
-
-
- - Here is an example of such a JWT Claim Set (formatted with whitespace for - readability): - - ``` - { - "iss": "auth.docker.com", - "sub": "jlhawn", - "aud": "registry.docker.com", - "exp": 1415387315, - "nbf": 1415387015, - "iat": 1415387015, - "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", - "access": [ - { - "type": "repository", - "name": "samalba/my-app", - "actions": [ - "pull", - "push" - ] - } - ] - } - ``` - -3. Signature - - The authorization server will produce a JOSE header and Claim Set with no - extraneous whitespace, i.e., the JOSE Header from above would be - - ``` - {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} - ``` - - and the Claim Set from above would be - - ``` - {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} - ``` - - The utf-8 representation of this JOSE header and Claim Set are then - url-safe base64 encoded (sans trailing '=' buffer), producing: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 - ``` - - for the JOSE Header and - - ``` - eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - for the Claim Set. These two are concatenated using a '.' character, - yielding the string: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - This is then used as the payload to a the `ES256` signature algorithm - specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) - draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) - - This example signature will use the following ECDSA key for the server: - - ``` - { - "kty": "EC", - "crv": "P-256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", - "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", - "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", - "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" - } - ``` - - A resulting signature of the above payload using this key is: - - ``` - QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - - Concatenating all of these together with a `.` character gives the - resulting JWT: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - -This can now be placed in an HTTP response and returned to the client to use to -authenticate to the audience service: - - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} -``` - -## Using the signed token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) - -## Verifying the token - -The registry must now verify the token presented by the user by inspecting the -claim set within. The registry will: - -- Ensure that the issuer (`iss` claim) is an authority it trusts. -- Ensure that the registry identifies as the audience (`aud` claim). -- Check that the current time is between the `nbf` and `exp` claim times. -- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has - not been seen before. - - To enforce this, the registry may keep a record of `jti`s it has seen for - up to the `exp` time of the token to prevent token replays. -- Check the `access` claim value and use the identified resources and the list - of actions authorized to determine whether the token grants the required - level of access for the operation the client is attempting to perform. -- Verify that the signature of the token is valid. - -If any of these requirements are not met, the registry will return a -`403 Forbidden` response to indicate that the token is invalid. - -**Note**: it is only at this point in the workflow that an authorization error -may occur. The token server should *not* return errors when the user does not -have the requested authorization. Instead, the returned token should indicate -whatever of the requested scope the client does have (the intersection of -requested and granted access). If the token does not supply proper -authorization then the registry will return the appropriate error. - -At no point in this process should the registry need to call back to the -authorization server. The registry only needs to be supplied with the trusted -public keys to verify the token signatures. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md deleted file mode 100644 index 61e893c0..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md +++ /dev/null @@ -1,220 +0,0 @@ - - -# Docker Registry v2 authentication via central service - -This document outlines the v2 Docker registry authentication scheme: - -![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) - -1. Attempt to begin a push/pull operation with the registry. -2. If the registry requires authorization it will return a `401 Unauthorized` - HTTP response with information on how to authenticate. -3. The registry client makes a request to the authorization service for a - Bearer token. -4. The authorization service returns an opaque Bearer token representing the - client's authorized access. -5. The client retries the original request with the Bearer token embedded in - the request's Authorization header. -6. The Registry authorizes the client by validating the Bearer token and the - claim set embedded within it and begins the push/pull session as usual. - -## Requirements - -- Registry clients which can understand and respond to token auth challenges - returned by the resource server. -- An authorization server capable of managing access controls to their - resources hosted by any given service (such as repositories in a Docker - Registry). -- A Docker Registry capable of trusting the authorization server to sign tokens - which clients can use for authorization and the ability to verify these - tokens for single use or for use during a sufficiently short period of time. - -## Authorization Server Endpoint Descriptions - -The described server is meant to serve as a standalone access control manager -for resources hosted by other services which wish to authenticate and manage -authorizations using a separate access control manager. - -A service like this is used by the official Docker Registry to authenticate -clients and verify their authorization to Docker image repositories. - -As of Docker 1.6, the registry client within the Docker Engine has been updated -to handle such an authorization workflow. - -## How to authenticate - -Registry V1 clients first contact the index to initiate a push or pull. Under -the Registry V2 workflow, clients should contact the registry first. If the -registry server requires authentication it will return a `401 Unauthorized` -response with a `WWW-Authenticate` header detailing how to authenticate to this -registry. - -For example, say I (username `jlhawn`) am attempting to push an image to the -repository `samalba/my-app`. For the registry to authorize this, I will need -`push` access to the `samalba/my-app` repository. The registry will first -return this response: - -``` -HTTP/1.1 401 Unauthorized -Content-Type: application/json; charset=utf-8 -Docker-Distribution-Api-Version: registry/2.0 -Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" -Date: Thu, 10 Sep 2015 19:32:31 GMT -Content-Length: 235 -Strict-Transport-Security: max-age=31536000 - -{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} -``` - -Note the HTTP Response Header indicating the auth challenge: - -``` -Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" -``` - -This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) - -This challenge indicates that the registry requires a token issued by the -specified token server and that the request the client is attempting will -need to include sufficient access entries in its claim set. To respond to this -challenge, the client will need to make a `GET` request to the URL -`https://auth.docker.io/token` using the `service` and `scope` values from the -`WWW-Authenticate` header. - -## Requesting a Token - -#### Query Parameters - -
-
- service -
-
- The name of the service which hosts the resource. -
-
- scope -
-
- The resource in question, formatted as one of the space-delimited - entries from the scope parameters from the WWW-Authenticate header - shown above. This query parameter should be specified multiple times if - there is more than one scope entry from the WWW-Authenticate - header. The above example would be specified as: - scope=repository:samalba/my-app:push. -
-
- - -#### Token Response Fields - -
-
- token -
-
- An opaque Bearer token that clients should supply to subsequent - requests in the Authorization header. -
-
- access_token -
-
- For compatibility with OAuth 2.0, we will also accept token under the name - access_token. At least one of these fields must be specified, but - both may also appear (for compatibility with older clients). When both are specified, - they should be equivalent; if they differ the client's choice is undefined. -
-
- expires_in -
-
- (Optional) The duration in seconds since the token was issued that it - will remain valid. When omitted, this defaults to 60 seconds. For - compatibility with older clients, a token should never be returned with - less than 60 seconds to live. -
-
- issued_at -
-
- (Optional) The RFC3339-serialized UTC - standard time at which a given token was issued. If issued_at is omitted, the - expiration is from when the token exchange completed. -
-
- -#### Example - -For this example, the client makes an HTTP GET request to the following URL: - -``` -https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push -``` - -The token server should first attempt to authenticate the client using any -authentication credentials provided with the request. As of Docker 1.8, the -registry client in the Docker Engine only supports Basic Authentication to -these token servers. If an attempt to authenticate to the token server fails, -the token server should return a `401 Unauthorized` response indicating that -the provided credentials are invalid. - -Whether the token server requires authentication is up to the policy of that -access control provider. Some requests may require authentication to determine -access (such as pushing or pulling a private repository) while others may not -(such as pulling from a public repository). - -After authenticating the client (which may simply be an anonymous client if -no attempt was made to authenticate), the token server must next query its -access control list to determine whether the client has the requested scope. In -this example request, if I have authenticated as user `jlhawn`, the token -server will determine what access I have to the repository `samalba/my-app` -hosted by the entity `registry.docker.io`. - -Once the token server has determined what access the client has to the -resources requested in the `scope` parameter, it will take the intersection of -the set of requested actions on each resource and the set of actions that the -client has in fact been granted. If the client only has a subset of the -requested access **it must not be considered an error** as it is not the -responsibility of the token server to indicate authorization errors as part of -this workflow. - -Continuing with the example request, the token server will find that the -client's set of granted access to the repository is `[pull, push]` which when -intersected with the requested access `[pull, push]` yields an equal set. If -the granted access set was found only to be `[pull]` then the intersected set -would only be `[pull]`. If the client has no access to the repository then the -intersected set would be empty, `[]`. - -It is this intersected set of access which is placed in the returned token. - -The server then constructs an implementation-specific token with this -intersected set of access, and returns it to the Docker client to use to -authenticate to the audience service (within the indicated window of time): - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": "3600","issued_at": "2009-11-10T23:00:00Z"} -``` - - -## Using the Bearer token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md deleted file mode 100644 index ec937b64..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md +++ /dev/null @@ -1,32 +0,0 @@ - - -# Distribution API Implementations - -This is a list of known implementations of the Distribution API spec. - -## [Docker Distribution Registry](https://github.com/docker/distribution) - -Docker distribution is the reference implementation of the distribution API -specification. It aims to fully implement the entire specification. - -### Releases -#### 2.0.1 (_in development_) -Implements API 2.0.1 - -_Known Issues_ - - No resumable push support - - Content ranges ignored - - Blob upload status will always return a starting range of 0 - -#### 2.0.0 -Implements API 2.0.0 - -_Known Issues_ - - No resumable push support - - No PATCH implementation for blob upload - - Content ranges ignored - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md deleted file mode 100644 index a8916dcc..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md +++ /dev/null @@ -1,94 +0,0 @@ - - - - -# Docker Distribution JSON Canonicalization - -To provide consistent content hashing of JSON objects throughout Docker -Distribution APIs, the specification defines a canonical JSON format. Adopting -such a canonicalization also aids in caching JSON responses. - -Note that protocols should not be designed to depend on identical JSON being -generated across different versions or clients. The canonicalization rules are -merely useful for caching and consistency. - -## Rules - -Compliant JSON should conform to the following rules: - -1. All generated JSON should comply with [RFC - 7159](http://www.ietf.org/rfc/rfc7159.txt). -2. Resulting "JSON text" shall always be encoded in UTF-8. -3. Unless a canonical key order is defined for a particular schema, object - keys shall always appear in lexically sorted order. -4. All whitespace between tokens should be removed. -5. No "trailing commas" are allowed in object or array definitions. -6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". - Ampersand "&" is escaped to "\u0026". - -## Examples - -The following is a simple example of a canonicalized JSON string: - -```json -{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} -``` - -## Reference - -### Other Canonicalizations - -The OLPC project specifies [Canonical -JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in -[TUF](http://theupdateframework.com/), which may be used with other -distribution-related protocols, this alternative format has been proposed in -case the original source changes. Specifications complying with either this -specification or an alternative should explicitly call out the -canonicalization format. Except for key ordering, this specification is mostly -compatible. - -### Go - -In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library -will emit canonical JSON by default. Simply using `json.Marshal` will suffice -in most cases: - -```go -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} - -canonical, err := json.Marshal(incoming) -if err != nil { - // ... handle error -} -``` - -To apply canonical JSON format spacing to an existing serialized JSON buffer, one -can use -[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) -with the following arguments: - -```go -incoming := getBytes() -var canonical bytes.Buffer -if err := json.Indent(&canonical, incoming, "", ""); err != nil { - // ... handle error -} -``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md deleted file mode 100644 index 418bca5d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md +++ /dev/null @@ -1,163 +0,0 @@ - - -# Image Manifest Version 2, Schema 1 - -This document outlines the format of of the V2 image manifest. The image -manifest described herein was introduced in the Docker daemon in the [v1.3.0 -release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). -It is a provisional manifest to provide a compatibility with the [V1 Image -format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the -requirements are defined for the [V2 Schema 2 -image](https://github.com/docker/distribution/pull/62). - - -Image manifests describe the various constituents of a docker image. Image -manifests can be serialized to JSON format with the following media types: - -Manifest Type | Media Type -------------- | ------------- -manifest | "application/vnd.docker.distribution.manifest.v1+json" -signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" - -*Note that "application/json" will also be accepted for schema 1.* - -References: - - - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) - - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) - -## *Manifest* Field Descriptions - -Manifest provides the base accessible fields for working with V2 image format - in the registry. - -- **`name`** *string* - - name is the name of the image's repository - -- **`tag`** *string* - - tag is the tag of the image - -- **`architecture`** *string* - - architecture is the host architecture on which this image is intended to - run. This is for information purposes and not currently used by the engine - -- **`fsLayers`** *array* - - fsLayers is a list of filesystem layer blob sums contained in this image. - - An fsLayer is a struct consisting of the following fields - - **`blobSum`** *digest.Digest* - - blobSum is the digest of the referenced filesystem image layer. A - digest must be a sha256 hash. - - -- **`history`** *array* - - history is a list of unstructured historical data for v1 compatibility. It - contains ID of the image layer and ID of the layer's parent layers. - - history is a struct consisting of the following fields - - **`v1Compatibility`** string - - V1Compatibility is the raw V1 compatibility information. This will - contain the JSON object describing the V1 of this image. - -- **`schemaVersion`** *int* - - SchemaVersion is the image manifest schema that this image follows. - ->**Note**:the length of `history` must be equal to the length of `fsLayers` and ->entries in each are correlated by index. - -## Signed Manifests - -Signed manifests provides an envelope for a signed image manifest. A signed -manifest consists of an image manifest along with an additional field -containing the signature of the manifest. - -The docker client can verify signed manifests and displays a message to the user. - -### Signing Manifests - -Image manifests can be signed in two different ways: with a *libtrust* private - key or an x509 certificate chain. When signing with an x509 certificate chain, - the public key of the first element in the chain must be the public key - corresponding with the sign key. - -### Signed Manifest Field Description - -Signed manifests include an image manifest and a list of signatures generated -by *libtrust*. A signature consists of the following fields: - - -- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* - - A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) - -- **`signature`** *string* - - A signature for the image manifest, signed by a *libtrust* private key - -- **`protected`** *string* - - The signed protected header - -## Example Manifest - -*Example showing the official 'hello-world' image manifest.* - -``` -{ - "name": "hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - ], - "schemaVersion": 1, - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", - "kty": "EC", - "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", - "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" - }, - "alg": "ES256" - }, - "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", - "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" - } - ] -} - -``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-2.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-2.md deleted file mode 100644 index 7449e580..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-2.md +++ /dev/null @@ -1,273 +0,0 @@ - - -# Image Manifest Version 2, Schema 2 - -This document outlines the format of of the V2 image manifest, schema version 2. -The original (and provisional) image manifest for V2 (schema 1), was introduced -in the Docker daemon in the [v1.3.0 -release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) -and is specified in the [schema 1 manifest definition](./manifest-v2-1.md) - -This second schema version has two primary goals. The first is to allow -multi-architecture images, through a "fat manifest" which references image -manifests for platform-specific versions of an image. The second is to -move the Docker engine towards content-addressable images, by supporting -an image model where the image's configuration can be hashed to generate -an ID for the image. - -# Media Types - -The following media types are used by the manifest formats described here, and -the resources they reference: - -- `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format) -- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2) -- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest" -- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar -- `application/vnd.docker.container.image.v1+json`: Container config JSON - -## Manifest List - -The manifest list is the "fat manifest" which points to specific image manifests -for one or more platforms. Its use is optional, and relatively few images will -use one of these manifests. A client will distinguish a manifest list from an -image manifest based on the Content-Type returned in the HTTP response. - -## *Manifest List* Field Descriptions - -- **`schemaVersion`** *int* - - This field specifies the image manifest schema version as an integer. This - schema uses the version `2`. - -- **`mediaType`** *string* - - The MIME type of the manifest list. This should be set to - `application/vnd.docker.distribution.manifest.list.v2+json`. - -- **`manifests`** *array* - - The manifests field contains a list of manifests for specific platforms. - - Fields of a object in the manifests list are: - - - **`mediaType`** *string* - - The MIME type of the referenced object. This will generally be - `application/vnd.docker.image.manifest.v2+json`, but it could also - be `application/vnd.docker.image.manifest.v1+json` if the manifest - list references a legacy schema-1 manifest. - - - **`size`** *int* - - The size in bytes of the object. This field exists so that a client - will have an expected size for the content before validating. If the - length of the retrieved content does not match the specified length, - the content should not be trusted. - - - **`digest`** *string* - - The digest of the content, as defined by the - [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). - - - **`platform`** *object* - - The platform object describes the platform which the image in the - manifest runs on. - - - **`architecture`** *string* - - The architecture field specifies the CPU architecture, for example - `amd64` or `ppc64`. - - - **`os`** *string* - - The os field specifies the operating system, for example - `linux` or `windows`. - - - **`variant`** *string* - - The optional variant field specifies a variant of the CPU, for - example `ppc64le` to specify a little-endian version of a PowerPC - CPU. - - - **`features`** *array* - - The optional features field specifies an array of strings, each - listing a required CPU feature (for example `sse4` or `aes`). - -## Example Manifest List - -*Example showing a simple manifest list pointing to image manifests for two platforms:* -```json -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", - "manifests": [ - { - "mediaType": "application/vnd.docker.image.manifest.v2+json", - "size": 7143, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - "platform": { - "architecture": "ppc64", - "os": "linux", - "variant": "ppc64le", - } - }, - { - "mediaType": "application/vnd.docker.image.manifest.v2+json", - "size": 7682, - "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", - "platform": { - "architecture": "x86-64", - "os": "linux", - "features": [ - "sse4" - ] - } - } - ] -} -``` - -# Image Manifest - -The image manifest provides a configuration and a set of layers for a container -image. It's the direct replacement for the schema-1 manifest. - -## *Image Manifest* Field Descriptions - -- **`schemaVersion`** *int* - - This field specifies the image manifest schema version as an integer. This - schema uses version `2`. - -- **`mediaType`** *string* - - The MIME type of the manifest. This should be set to - `application/vnd.docker.distribution.manifest.v2+json`. - -- **`config`** *object* - - The config field references a configuration object for a container, by - digest. This configuration item is a JSON blob that the runtime uses - to set up the container. This new schema uses a tweaked version - of this configuration to allow image content-addressability on the - daemon side. - - Fields of a config object are: - - - **`mediaType`** *string* - - The MIME type of the referenced object. This should generally be - `application/vnd.docker.container.image.v1+json`. - - - **`size`** *int* - - The size in bytes of the object. This field exists so that a client - will have an expected size for the content before validating. If the - length of the retrieved content does not match the specified length, - the content should not be trusted. - - - **`digest`** *string* - - The digest of the content, as defined by the - [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). - -- **`layers`** *array* - - The layer list is ordered starting from the base image (opposite order of schema1). - - Fields of an item in the layers list are: - - - **`mediaType`** *string* - - The MIME type of the referenced object. This should - generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`. - - - **`size`** *int* - - The size in bytes of the object. This field exists so that a client - will have an expected size for the content before validating. If the - length of the retrieved content does not match the specified length, - the content should not be trusted. - - - **`digest`** *string* - - The digest of the content, as defined by the - [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). - -## Example Image Manifest - -*Example showing an image manifest:* -```json -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 32654, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 16724, - "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 73109, - "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" - } - ], -} -``` - -# Backward compatibility - -The registry will continue to accept uploads of manifests in both the old and -new formats. - -When pushing images, clients which support the new manifest format should first -construct a manifest in the new format. If uploading this manifest fails, -presumably because the registry only supports the old format, the client may -fall back to uploading a manifest in the old format. - -When pulling images, clients indicate support for this new version of the -manifest format by sending the -`application/vnd.docker.distribution.manifest.v2+json` and -`application/vnd.docker.distribution.manifest.list.v2+json` media types in an -`Accept` header when making a request to the `manifests` endpoint. Updated -clients should check the `Content-Type` header to see whether the manifest -returned from the endpoint is in the old format, or is an image manifest or -manifest list in the new format. - -If the manifest being requested uses the new format, and the appropriate media -type is not present in an `Accept` header, the registry will assume that the -client cannot handle the manifest as-is, and rewrite it on the fly into the old -format. If the object that would otherwise be returned is a manifest list, the -registry will look up the appropriate manifest for the x86-64 platform and -linux OS, rewrite that manifest into the old format if necessary, and return -the result to the client. If no suitable manifest is found in the manifest -list, the registry will return a 404 error. - -One of the challenges in rewriting manifests to the old format is that the old -format involves an image configuration for each layer in the manifest, but the -new format only provides one image configuration. To work around this, the -registry will create synthetic image configurations for all layers except the -top layer. These image configurations will not result in runnable images on -their own, but only serve to fill in the parent chain in a compatible way. -The IDs in these synthetic configurations will be derived from hashes of their -respective blobs. The registry will create these configurations and their IDs -using the same scheme as Docker 1.10 when it creates a legacy manifest to push -to a registry which doesn't support the new format. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md deleted file mode 100644 index 2783c427..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md +++ /dev/null @@ -1,76 +0,0 @@ - - - -# Microsoft Azure storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/) for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accountname - - yes - - Name of the Azure Storage Account. -
- accountkey - - yes - - Primary or Secondary Key for the Storage Account. -
- container - - yes - - Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. -
- realm - - no - - Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this - is core.windows.net. -
- - -## Related Information - -* To get information about -[azure-blob-storage](http://azure.microsoft.com/en-us/services/storage/) visit -the Microsoft website. -* You can use Microsoft's [Blob Service REST API](https://msdn.microsoft.com/en-us/library/azure/dd135733.aspx) to [create a container] (https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md deleted file mode 100644 index 476edcf5..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md +++ /dev/null @@ -1,18 +0,0 @@ - - - -# Filesystem storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. - -## Parameters - -`rootdirectory`: (optional) The absolute path to a root directory tree in which -to store all registry files. The registry stores all its data here so make sure -there is adequate space available. Defaults to `/var/lib/registry`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md deleted file mode 100644 index 33cc94b3..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md +++ /dev/null @@ -1,65 +0,0 @@ - - - -# Google Cloud Storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- bucket - - yes - - Storage bucket name. -
- keyfile - - no - - A private service account key file in JSON format. Instead of a key file Google Application Default Credentials can be used. -
- rootdirectory - - no - - This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. -
- - -`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). - -**Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/index.md deleted file mode 100644 index 2de729ad..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/index.md +++ /dev/null @@ -1,7 +0,0 @@ - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md deleted file mode 100644 index 3109891e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md +++ /dev/null @@ -1,21 +0,0 @@ - - - -# In-memory storage driver (Testing Only) - -For purely tests purposes, you can use the `inmemory` storage driver. This -driver is an implementation of the `storagedriver.StorageDriver` interface which -uses local memory for object storage. If you would like to run a registry from -volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk. - -**IMPORTANT**: This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production. - -## Parameters - -None diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md deleted file mode 100644 index 2087c98d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md +++ /dev/null @@ -1,124 +0,0 @@ - - -# Aliyun OSS storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accesskeyid - -yes - -Your access key ID. -
- accesskeysecret - -yes - -Your access key secret. -
- region - -yes - The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at -
- endpoint - -no - -An endpoint which defaults to `..aliyuncs.com` or `.-internal.aliyuncs.com` (when `internal=true`). You can change the default endpoint by changing this value. -
- internal - -no - An internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at -
- bucket - -yes - The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). -
- encrypt - -no - Specifies whether you would like your data encrypted on the server side. Defaults to false if not specified. -
- secure - -no - Specifies whether to transfer data to the bucket over ssl or not. If you omit this value, `true` is used. -
- chunksize - -no - The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. -
- rootdirectory - -no - The root directory tree in which to store all registry files. Defaults to an empty string (bucket root). -
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md deleted file mode 100644 index 12643b2a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md +++ /dev/null @@ -1,83 +0,0 @@ - - - -# Ceph RADOS storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses -[Ceph RADOS Object Storage][rados] for storage backend. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- poolname - - yes - - Ceph pool name. -
- username - - no - - Ceph cluster user to connect as (i.e. admin, not client.admin). -
- chunksize - - no - - Size of the written RADOS objects. Default value is 4MB (4194304). -
- - -The following parameters must be used to configure the storage driver -(case-sensitive): - -* `poolname`: Name of the Ceph pool -* `username` *optional*: The user to connect as (i.e. admin, not client.admin) -* `chunksize` *optional*: Size of the written RADOS objects. Default value is -4MB (4194304). - -This drivers loads the [Ceph client configuration][rados-config] from the -following regular paths (the first found is used): - -* `$CEPH_CONF` (environment variable) -* `/etc/ceph/ceph.conf` -* `~/.ceph/config` -* `ceph.conf` (in the current working directory) - -## Developing - -To include this driver when building Docker Distribution, use the build tag -`include_rados`. Please see the [building documentation][building] for details. - -[rados]: http://ceph.com/docs/master/rados/ -[rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ -[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md deleted file mode 100644 index 5b172f9e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md +++ /dev/null @@ -1,197 +0,0 @@ - - - -# S3 storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accesskey - - yes - - Your AWS Access Key. -
- secretkey - - yes - - Your AWS Secret Key. -
- region - - yes - - The AWS region in which your bucket exists. For the moment, the Go AWS - library in use does not use the newer DNS based bucket routing. -
- bucket - - yes - - The bucket name in which you want to store the registry's data. -
- encrypt - - no - - Specifies whether the registry stores the image in encrypted format or - not. A boolean value. The default is false. -
- secure - - no - - Indicates whether to use HTTPS instead of HTTP. A boolean value. The - default is true. -
- v4auth - - no - - Indicates whether the registry uses Version 4 of AWS's authentication. - Generally, you should set this to true. By default, this is - false. -
- chunksize - - no - - The S3 API requires multipart upload chunks to be at least 5MB. This value - should be a number that is larger than 5*1024*1024. -
- rootdirectory - - no - - This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. -
- - -`accesskey`: Your aws access key. - -`secretkey`: Your aws secret key. - -**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. - -`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html - -`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization. - -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. - -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) - -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). - -# CloudFront as Middleware with S3 backend - -## Use Case - -Adding CloudFront as a middleware for your S3 backed registry can dramatically improve pull times. Your registry will have the ability to retrieve your images from edge servers, rather than the geographically limited location of your S3 bucket. The farther your registry is from your bucket, the more improvements you will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). - -## Configuring CloudFront for Distribution - -If you are unfamiliar with creating a CloudFront distribution, see [Getting Started with Cloudfront](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html). - -Defaults can be kept in most areas except: - -### Origin: - -The CloudFront distribution must be created such that the `Origin Path` is set to the directory level of the root "docker" key in S3. If your registry exists on the root of the bucket, this path should be left blank. - -### Behaviors: - - - Viewer Protocol Policy: HTTPS Only - - Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE - - Cached HTTP Methods: OPTIONS (checked) - - Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes - - Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts) - -## Registry configuration - -Here the `middleware` option is used. It is still important to keep the `storage` option as CloudFront will only handle `pull` actions; `push` actions are still directly written to S3. - -The following example shows what you will need at minimum: -``` -... -storage: - s3: - region: us-east-1 - bucket: docker.myregistry.com -middleware: - storage: - - name: cloudfront - options: - baseurl: https://abcdefghijklmn.cloudfront.net/ - privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem - keypairid: ABCEDFGHIJKLMNOPQRST -... -``` - -## CloudFront Key-Pair - -A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md deleted file mode 100644 index cab0bbd2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md +++ /dev/null @@ -1,375 +0,0 @@ - - - -# OpenStack Swift storage driver - -An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- authurl - - yes - - URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth -
- username - - yes - - Your Openstack user name. -
- password - - yes - - Your Openstack password. -
- region - - no - - The Openstack region in which your container exists. -
- container - - yes - - The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. -
- tenant - - no - - Your Openstack tenant name. You can either use tenant or tenantid. -
- tenantid - - no - - Your Openstack tenant id. You can either use tenant or tenantid. -
- domain - - no - - Your Openstack domain name for Identity v3 API. You can either use domain or domainid. -
- domainid - - no - - Your Openstack domain id for Identity v3 API. You can either use domain or domainid. -
- trustid - - no - - Your Openstack trust id for Identity v3 API. -
- insecureskipverify - - no - - true to skip TLS verification, false by default. -
- chunksize - - no - - Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). -
- prefix - - no - - This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. -
- secretkey - - no - - The secret key used to generate temporary URLs. -
- accesskey - - no - - The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter. -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- authurl - -

URL for obtaining an auth token.

-
- username - -

- Your OpenStack user name.

-

-
- password -

-

-

- Your OpenStack password. -

-
- container - -

- The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. -

-
- tenant - -

- Optionally, your OpenStack tenant name. You can either use tenant or tenantid. -

-
- tenantid - -

- Optionally, your OpenStack tenant id. You can either use tenant or tenantid. -

-
- domain - -

- Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. -

-
- domainid - -

- Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. -

-
- trustid - -

- Optionally, your OpenStack trust id for Identity v3 API. -

-
- insecureskipverify - -

- Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. -

-
- region - -

- Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). -

-
- chunksize - -

- Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. -

-
- prefix - -

- Optionally, supply a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.

-

-
- secretkey - -

- Optionally, the secret key used to generate temporary URLs.

-

-
- accesskey - -

- Optionally, the access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.

-

-
- -The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator -disabled that feature, the configuration file can specify the following optional parameters : - - - - - - - - - - -
- tempurlcontainerkey - -

- Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.

-

-
- tempurlmethods - -

- Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:

- - - tempurlmethods: - - GET - - PUT - - HEAD - - POST - - DELETE - -

-
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md deleted file mode 100644 index 158ad999..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md +++ /dev/null @@ -1,65 +0,0 @@ - - - -# Docker Registry Storage Driver - -This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. - -## Provided Drivers - -This storage driver package comes bundled with several drivers: - -- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. -- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. -- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. -- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). -- [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. -- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). -- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). -- [gcs](storage-drivers/gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. - -## Storage Driver API - -The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. - -Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. - -Storage drivers are intended to be written in Go, providing compile-time -validation of the `storagedriver.StorageDriver` interface. - -## Driver Selection and Configuration - -The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. - -Storage driver factories may be registered by name using the -`factory.Register` method, and then later invoked by calling `factory.Create` -with a driver name and parameters map. If no such storage driver can be found, -`factory.Create` will return an `InvalidStorageDriverError`. - -## Driver Contribution - -### Writing new storage drivers - -To create a valid storage driver, one must implement the -`storagedriver.StorageDriver` interface and make sure to expose this driver -via the factory system. - -#### Registering - -Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. - -## Testing - -Storage driver test suites are provided in -`storagedriver/testsuites/testsuites.go` and may be used for any storage -driver written in Go. Tests can be registered using the `RegisterSuite` -function, which run the same set of tests for any registered drivers. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go deleted file mode 100644 index 73fcc453..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "errors" - "net/http" - - "github.com/docker/distribution/health" -) - -var ( - updater = health.NewStatusUpdater() -) - -// DownHandler registers a manual_http_status that always returns an Error -func DownHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(errors.New("Manual Check")) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// UpHandler registers a manual_http_status that always returns nil -func UpHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(nil) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// init sets up the two endpoints to bring the service up and down -func init() { - health.Register("manual_http_status", updater) - http.HandleFunc("/debug/health/down", DownHandler) - http.HandleFunc("/debug/health/up", UpHandler) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go deleted file mode 100644 index e3c3b08d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go +++ /dev/null @@ -1,62 +0,0 @@ -package checks - -import ( - "errors" - "net" - "net/http" - "os" - "strconv" - "time" - - "github.com/docker/distribution/health" -) - -// FileChecker checks the existence of a file and returns an error -// if the file exists. -func FileChecker(f string) health.Checker { - return health.CheckFunc(func() error { - if _, err := os.Stat(f); err == nil { - return errors.New("file exists") - } - return nil - }) -} - -// HTTPChecker does a HEAD request and verifies that the HTTP status code -// returned matches statusCode. -func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker { - return health.CheckFunc(func() error { - client := http.Client{ - Timeout: timeout, - } - req, err := http.NewRequest("HEAD", r, nil) - if err != nil { - return errors.New("error creating request: " + r) - } - for headerName, headerValues := range headers { - for _, headerValue := range headerValues { - req.Header.Add(headerName, headerValue) - } - } - response, err := client.Do(req) - if err != nil { - return errors.New("error while checking: " + r) - } - if response.StatusCode != statusCode { - return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode)) - } - return nil - }) -} - -// TCPChecker attempts to open a TCP connection. -func TCPChecker(addr string, timeout time.Duration) health.Checker { - return health.CheckFunc(func() error { - conn, err := net.DialTimeout("tcp", addr, timeout) - if err != nil { - return errors.New("connection to " + addr + " failed") - } - conn.Close() - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go deleted file mode 100644 index 194b8a56..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go +++ /dev/null @@ -1,130 +0,0 @@ -// Package health provides a generic health checking framework. -// The health package works expvar style. By importing the package the debug -// server is getting a "/debug/health" endpoint that returns the current -// status of the application. -// If there are no errors, "/debug/health" will return a HTTP 200 status, -// together with an empty JSON reply "{}". If there are any checks -// with errors, the JSON reply will include all the failed checks, and the -// response will be have an HTTP 503 status. -// -// A Check can either be run synchronously, or asynchronously. We recommend -// that most checks are registered as an asynchronous check, so a call to the -// "/debug/health" endpoint always returns immediately. This pattern is -// particularly useful for checks that verify upstream connectivity or -// database status, since they might take a long time to return/timeout. -// -// Installing -// -// To install health, just import it in your application: -// -// import "github.com/docker/distribution/health" -// -// You can also (optionally) import "health/api" that will add two convenience -// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add -// "manual" checks that allow the service to quickly be brought in/out of -// rotation. -// -// import _ "github.com/docker/distribution/registry/health/api" -// -// # curl localhost:5001/debug/health -// {} -// # curl -X POST localhost:5001/debug/health/down -// # curl localhost:5001/debug/health -// {"manual_http_status":"Manual Check"} -// -// After importing these packages to your main application, you can start -// registering checks. -// -// Registering Checks -// -// The recommended way of registering checks is using a periodic Check. -// PeriodicChecks run on a certain schedule and asynchronously update the -// status of the check. This allows CheckStatus to return without blocking -// on an expensive check. -// -// A trivial example of a check that runs every 5 seconds and shuts down our -// server if the current minute is even, could be added as follows: -// -// func currentMinuteEvenCheck() error { -// m := time.Now().Minute() -// if m%2 == 0 { -// return errors.New("Current minute is even!") -// } -// return nil -// } -// -// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) -// -// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to -// implement the exact same check, but add a threshold of failures after which -// the check will be unhealthy. This is particularly useful for flaky Checks, -// ensuring some stability of the service when handling them. -// -// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) -// -// The lowest-level way to interact with the health package is calling -// "Register" directly. Register allows you to pass in an arbitrary string and -// something that implements "Checker" and runs your check. If your method -// returns an error with nil, it is considered a healthy check, otherwise it -// will make the health check endpoint "/debug/health" start returning a 503 -// and list the specific check that failed. -// -// Assuming you wish to register a method called "currentMinuteEvenCheck() -// error" you could do that by doing: -// -// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) -// -// CheckFunc is a convenience type that implements Checker. -// -// Another way of registering a check could be by using an anonymous function -// and the convenience method RegisterFunc. An example that makes the status -// endpoint always return an error: -// -// health.RegisterFunc("my_check", func() error { -// return Errors.new("This is an error!") -// })) -// -// Examples -// -// You could also use the health checker mechanism to ensure your application -// only comes up if certain conditions are met, or to allow the developer to -// take the service out of rotation immediately. An example that checks -// database connectivity and immediately takes the server out of rotation on -// err: -// -// updater = health.NewStatusUpdater() -// health.RegisterFunc("database_check", func() error { -// return updater.Check() -// })) -// -// conn, err := Connect(...) // database call here -// if err != nil { -// updater.Update(errors.New("Error connecting to the database: " + err.Error())) -// } -// -// You can also use the predefined Checkers that come included with the health -// package. First, import the checks: -// -// import "github.com/docker/distribution/health/checks -// -// After that you can make use of any of the provided checks. An example of -// using a `FileChecker` to take the application out of rotation if a certain -// file exists can be done as follows: -// -// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) -// -// After registering the check, it is trivial to take an application out of -// rotation from the console: -// -// # curl localhost:5001/debug/health -// {} -// # touch /tmp/disable -// # curl localhost:5001/debug/health -// {"fileChecker":"file exists"} -// -// You could also test the connectivity to a downstream service by using a -// "HTTPChecker", but ensure that you only mark the test unhealthy if there -// are a minimum of two failures in a row: -// -// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) -package health diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go deleted file mode 100644 index 220282dc..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go +++ /dev/null @@ -1,306 +0,0 @@ -package health - -import ( - "encoding/json" - "fmt" - "net/http" - "sync" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/errcode" -) - -// A Registry is a collection of checks. Most applications will use the global -// registry defined in DefaultRegistry. However, unit tests may need to create -// separate registries to isolate themselves from other tests. -type Registry struct { - mu sync.RWMutex - registeredChecks map[string]Checker -} - -// NewRegistry creates a new registry. This isn't necessary for normal use of -// the package, but may be useful for unit tests so individual tests have their -// own set of checks. -func NewRegistry() *Registry { - return &Registry{ - registeredChecks: make(map[string]Checker), - } -} - -// DefaultRegistry is the default registry where checks are registered. It is -// the registry used by the HTTP handler. -var DefaultRegistry *Registry - -// Checker is the interface for a Health Checker -type Checker interface { - // Check returns nil if the service is okay. - Check() error -} - -// CheckFunc is a convenience type to create functions that implement -// the Checker interface -type CheckFunc func() error - -// Check Implements the Checker interface to allow for any func() error method -// to be passed as a Checker -func (cf CheckFunc) Check() error { - return cf() -} - -// Updater implements a health check that is explicitly set. -type Updater interface { - Checker - - // Update updates the current status of the health check. - Update(status error) -} - -// updater implements Checker and Updater, providing an asynchronous Update -// method. -// This allows us to have a Checker that returns the Check() call immediately -// not blocking on a potentially expensive check. -type updater struct { - mu sync.Mutex - status error -} - -// Check implements the Checker interface -func (u *updater) Check() error { - u.mu.Lock() - defer u.mu.Unlock() - - return u.status -} - -// Update implements the Updater interface, allowing asynchronous access to -// the status of a Checker. -func (u *updater) Update(status error) { - u.mu.Lock() - defer u.mu.Unlock() - - u.status = status -} - -// NewStatusUpdater returns a new updater -func NewStatusUpdater() Updater { - return &updater{} -} - -// thresholdUpdater implements Checker and Updater, providing an asynchronous Update -// method. -// This allows us to have a Checker that returns the Check() call immediately -// not blocking on a potentially expensive check. -type thresholdUpdater struct { - mu sync.Mutex - status error - threshold int - count int -} - -// Check implements the Checker interface -func (tu *thresholdUpdater) Check() error { - tu.mu.Lock() - defer tu.mu.Unlock() - - if tu.count >= tu.threshold { - return tu.status - } - - return nil -} - -// thresholdUpdater implements the Updater interface, allowing asynchronous -// access to the status of a Checker. -func (tu *thresholdUpdater) Update(status error) { - tu.mu.Lock() - defer tu.mu.Unlock() - - if status == nil { - tu.count = 0 - } else if tu.count < tu.threshold { - tu.count++ - } - - tu.status = status -} - -// NewThresholdStatusUpdater returns a new thresholdUpdater -func NewThresholdStatusUpdater(t int) Updater { - return &thresholdUpdater{threshold: t} -} - -// PeriodicChecker wraps an updater to provide a periodic checker -func PeriodicChecker(check Checker, period time.Duration) Checker { - u := NewStatusUpdater() - go func() { - t := time.NewTicker(period) - for { - <-t.C - u.Update(check.Check()) - } - }() - - return u -} - -// PeriodicThresholdChecker wraps an updater to provide a periodic checker that -// uses a threshold before it changes status -func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { - tu := NewThresholdStatusUpdater(threshold) - go func() { - t := time.NewTicker(period) - for { - <-t.C - tu.Update(check.Check()) - } - }() - - return tu -} - -// CheckStatus returns a map with all the current health check errors -func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type - registry.mu.RLock() - defer registry.mu.RUnlock() - statusKeys := make(map[string]string) - for k, v := range registry.registeredChecks { - err := v.Check() - if err != nil { - statusKeys[k] = err.Error() - } - } - - return statusKeys -} - -// CheckStatus returns a map with all the current health check errors from the -// default registry. -func CheckStatus() map[string]string { - return DefaultRegistry.CheckStatus() -} - -// Register associates the checker with the provided name. -func (registry *Registry) Register(name string, check Checker) { - if registry == nil { - registry = DefaultRegistry - } - registry.mu.Lock() - defer registry.mu.Unlock() - _, ok := registry.registeredChecks[name] - if ok { - panic("Check already exists: " + name) - } - registry.registeredChecks[name] = check -} - -// Register associates the checker with the provided name in the default -// registry. -func Register(name string, check Checker) { - DefaultRegistry.Register(name, check) -} - -// RegisterFunc allows the convenience of registering a checker directly from -// an arbitrary func() error. -func (registry *Registry) RegisterFunc(name string, check func() error) { - registry.Register(name, CheckFunc(check)) -} - -// RegisterFunc allows the convenience of registering a checker in the default -// registry directly from an arbitrary func() error. -func RegisterFunc(name string, check func() error) { - DefaultRegistry.RegisterFunc(name, check) -} - -// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker -// from an arbitrary func() error. -func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { - registry.Register(name, PeriodicChecker(CheckFunc(check), period)) -} - -// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker -// in the default registry from an arbitrary func() error. -func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { - DefaultRegistry.RegisterPeriodicFunc(name, period, check) -} - -// RegisterPeriodicThresholdFunc allows the convenience of registering a -// PeriodicChecker from an arbitrary func() error. -func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { - registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) -} - -// RegisterPeriodicThresholdFunc allows the convenience of registering a -// PeriodicChecker in the default registry from an arbitrary func() error. -func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { - DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check) -} - -// StatusHandler returns a JSON blob with all the currently registered Health Checks -// and their corresponding status. -// Returns 503 if any Error status exists, 200 otherwise -func StatusHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" { - checks := CheckStatus() - status := http.StatusOK - - // If there is an error, return 503 - if len(checks) != 0 { - status = http.StatusServiceUnavailable - } - - statusResponse(w, r, status, checks) - } else { - http.NotFound(w, r) - } -} - -// Handler returns a handler that will return 503 response code if the health -// checks have failed. If everything is okay with the health checks, the -// handler will pass through to the provided handler. Use this handler to -// disable a web application when the health checks fail. -func Handler(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - checks := CheckStatus() - if len(checks) != 0 { - errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. - WithDetail("health check failed: please see /debug/health")) - return - } - - handler.ServeHTTP(w, r) // pass through - }) -} - -// statusResponse completes the request with a response describing the health -// of the service. -func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { - p, err := json.Marshal(checks) - if err != nil { - context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) - p, err = json.Marshal(struct { - ServerError string `json:"server_error"` - }{ - ServerError: "Could not parse error message", - }) - status = http.StatusInternalServerError - - if err != nil { - context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) - return - } - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(p))) - w.WriteHeader(status) - if _, err := w.Write(p); err != nil { - context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) - } -} - -// Registers global /debug/health api endpoint, creates default registry -func init() { - DefaultRegistry = NewRegistry() - http.HandleFunc("/debug/health", StatusHandler) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go deleted file mode 100644 index 2133b3be..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go +++ /dev/null @@ -1,166 +0,0 @@ -package notifications - -import ( - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/uuid" -) - -type bridge struct { - ub URLBuilder - actor ActorRecord - source SourceRecord - request RequestRecord - sink Sink -} - -var _ Listener = &bridge{} - -// URLBuilder defines a subset of url builder to be used by the event listener. -type URLBuilder interface { - BuildManifestURL(name, tag string) (string, error) - BuildBlobURL(name string, dgst digest.Digest) (string, error) -} - -// NewBridge returns a notification listener that writes records to sink, -// using the actor and source. Any urls populated in the events created by -// this bridge will be created using the URLBuilder. -// TODO(stevvooe): Update this to simply take a context.Context object. -func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { - return &bridge{ - ub: ub, - actor: actor, - source: source, - request: request, - sink: sink, - } -} - -// NewRequestRecord builds a RequestRecord for use in NewBridge from an -// http.Request, associating it with a request id. -func NewRequestRecord(id string, r *http.Request) RequestRecord { - return RequestRecord{ - ID: id, - Addr: context.RemoteAddr(r), - Host: r.Host, - Method: r.Method, - UserAgent: r.UserAgent(), - } -} - -func (b *bridge) ManifestPushed(repo string, sm distribution.Manifest) error { - return b.createManifestEventAndWrite(EventActionPush, repo, sm) -} - -func (b *bridge) ManifestPulled(repo string, sm distribution.Manifest) error { - return b.createManifestEventAndWrite(EventActionPull, repo, sm) -} - -func (b *bridge) ManifestDeleted(repo string, sm distribution.Manifest) error { - return b.createManifestEventAndWrite(EventActionDelete, repo, sm) -} - -func (b *bridge) BlobPushed(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionPush, repo, desc) -} - -func (b *bridge) BlobPulled(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionPull, repo, desc) -} - -func (b *bridge) BlobMounted(repo string, desc distribution.Descriptor, fromRepo string) error { - event, err := b.createBlobEvent(EventActionMount, repo, desc) - if err != nil { - return err - } - event.Target.FromRepository = fromRepo - return b.sink.Write(*event) -} - -func (b *bridge) BlobDeleted(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionDelete, repo, desc) -} - -func (b *bridge) createManifestEventAndWrite(action string, repo string, sm distribution.Manifest) error { - manifestEvent, err := b.createManifestEvent(action, repo, sm) - if err != nil { - return err - } - - return b.sink.Write(*manifestEvent) -} - -func (b *bridge) createManifestEvent(action string, repo string, sm distribution.Manifest) (*Event, error) { - event := b.createEvent(action) - event.Target.Repository = repo - - mt, p, err := sm.Payload() - if err != nil { - return nil, err - } - - // Ensure we have the canonical manifest descriptor here - _, desc, err := distribution.UnmarshalManifest(mt, p) - if err != nil { - return nil, err - } - - event.Target.MediaType = mt - event.Target.Length = desc.Size - event.Target.Size = desc.Size - event.Target.Digest = desc.Digest - - event.Target.URL, err = b.ub.BuildManifestURL(repo, event.Target.Digest.String()) - if err != nil { - return nil, err - } - - return event, nil -} - -func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distribution.Descriptor) error { - event, err := b.createBlobEvent(action, repo, desc) - if err != nil { - return err - } - - return b.sink.Write(*event) -} - -func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) { - event := b.createEvent(action) - event.Target.Descriptor = desc - event.Target.Length = desc.Size - event.Target.Repository = repo - - var err error - event.Target.URL, err = b.ub.BuildBlobURL(repo, desc.Digest) - if err != nil { - return nil, err - } - - return event, nil -} - -// createEvent creates an event with actor and source populated. -func (b *bridge) createEvent(action string) *Event { - event := createEvent(action) - event.Source = b.source - event.Actor = b.actor - event.Request = b.request - - return event -} - -// createEvent returns a new event, timestamped, with the specified action. -func createEvent(action string) *Event { - return &Event{ - ID: uuid.Generate().String(), - Timestamp: time.Now(), - Action: action, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go deleted file mode 100644 index dfdb111c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go +++ /dev/null @@ -1,86 +0,0 @@ -package notifications - -import ( - "net/http" - "time" -) - -// EndpointConfig covers the optional configuration parameters for an active -// endpoint. -type EndpointConfig struct { - Headers http.Header - Timeout time.Duration - Threshold int - Backoff time.Duration -} - -// defaults set any zero-valued fields to a reasonable default. -func (ec *EndpointConfig) defaults() { - if ec.Timeout <= 0 { - ec.Timeout = time.Second - } - - if ec.Threshold <= 0 { - ec.Threshold = 10 - } - - if ec.Backoff <= 0 { - ec.Backoff = time.Second - } -} - -// Endpoint is a reliable, queued, thread-safe sink that notify external http -// services when events are written. Writes are non-blocking and always -// succeed for callers but events may be queued internally. -type Endpoint struct { - Sink - url string - name string - - EndpointConfig - - metrics *safeMetrics -} - -// NewEndpoint returns a running endpoint, ready to receive events. -func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { - var endpoint Endpoint - endpoint.name = name - endpoint.url = url - endpoint.EndpointConfig = config - endpoint.defaults() - endpoint.metrics = newSafeMetrics() - - // Configures the inmemory queue, retry, http pipeline. - endpoint.Sink = newHTTPSink( - endpoint.url, endpoint.Timeout, endpoint.Headers, - endpoint.metrics.httpStatusListener()) - endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) - endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) - - register(&endpoint) - return &endpoint -} - -// Name returns the name of the endpoint, generally used for debugging. -func (e *Endpoint) Name() string { - return e.name -} - -// URL returns the url of the endpoint. -func (e *Endpoint) URL() string { - return e.url -} - -// ReadMetrics populates em with metrics from the endpoint. -func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { - e.metrics.Lock() - defer e.metrics.Unlock() - - *em = e.metrics.EndpointMetrics - // Map still need to copied in a threadsafe manner. - em.Statuses = make(map[string]int) - for k, v := range e.metrics.Statuses { - em.Statuses[k] = v - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go deleted file mode 100644 index 19d6a776..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go +++ /dev/null @@ -1,157 +0,0 @@ -package notifications - -import ( - "fmt" - "time" - - "github.com/docker/distribution" -) - -// EventAction constants used in action field of Event. -const ( - EventActionPull = "pull" - EventActionPush = "push" - EventActionMount = "mount" - EventActionDelete = "delete" -) - -const ( - // EventsMediaType is the mediatype for the json event envelope. If the - // Event, ActorRecord, SourceRecord or Envelope structs change, the version - // number should be incremented. - EventsMediaType = "application/vnd.docker.distribution.events.v1+json" - // LayerMediaType is the media type for image rootfs diffs (aka "layers") - // used by Docker. We don't expect this to change for quite a while. - layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" -) - -// Envelope defines the fields of a json event envelope message that can hold -// one or more events. -type Envelope struct { - // Events make up the contents of the envelope. Events present in a single - // envelope are not necessarily related. - Events []Event `json:"events,omitempty"` -} - -// TODO(stevvooe): The event type should be separate from the json format. It -// should be defined as an interface. Leaving as is for now since we don't -// need that at this time. If we make this change, the struct below would be -// called "EventRecord". - -// Event provides the fields required to describe a registry event. -type Event struct { - // ID provides a unique identifier for the event. - ID string `json:"id,omitempty"` - - // Timestamp is the time at which the event occurred. - Timestamp time.Time `json:"timestamp,omitempty"` - - // Action indicates what action encompasses the provided event. - Action string `json:"action,omitempty"` - - // Target uniquely describes the target of the event. - Target struct { - // TODO(stevvooe): Use http.DetectContentType for layers, maybe. - - distribution.Descriptor - - // Length in bytes of content. Same as Size field in Descriptor. - // Provided for backwards compatibility. - Length int64 `json:"length,omitempty"` - - // Repository identifies the named repository. - Repository string `json:"repository,omitempty"` - - // FromRepository identifies the named repository which a blob was mounted - // from if appropriate. - FromRepository string `json:"fromRepository,omitempty"` - - // URL provides a direct link to the content. - URL string `json:"url,omitempty"` - } `json:"target,omitempty"` - - // Request covers the request that generated the event. - Request RequestRecord `json:"request,omitempty"` - - // Actor specifies the agent that initiated the event. For most - // situations, this could be from the authorizaton context of the request. - Actor ActorRecord `json:"actor,omitempty"` - - // Source identifies the registry node that generated the event. Put - // differently, while the actor "initiates" the event, the source - // "generates" it. - Source SourceRecord `json:"source,omitempty"` -} - -// ActorRecord specifies the agent that initiated the event. For most -// situations, this could be from the authorizaton context of the request. -// Data in this record can refer to both the initiating client and the -// generating request. -type ActorRecord struct { - // Name corresponds to the subject or username associated with the - // request context that generated the event. - Name string `json:"name,omitempty"` - - // TODO(stevvooe): Look into setting a session cookie to get this - // without docker daemon. - // SessionID - - // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and - // get the actual command. - // Command -} - -// RequestRecord covers the request that generated the event. -type RequestRecord struct { - // ID uniquely identifies the request that initiated the event. - ID string `json:"id"` - - // Addr contains the ip or hostname and possibly port of the client - // connection that initiated the event. This is the RemoteAddr from - // the standard http request. - Addr string `json:"addr,omitempty"` - - // Host is the externally accessible host name of the registry instance, - // as specified by the http host header on incoming requests. - Host string `json:"host,omitempty"` - - // Method has the request method that generated the event. - Method string `json:"method"` - - // UserAgent contains the user agent header of the request. - UserAgent string `json:"useragent"` -} - -// SourceRecord identifies the registry node that generated the event. Put -// differently, while the actor "initiates" the event, the source "generates" -// it. -type SourceRecord struct { - // Addr contains the ip or hostname and the port of the registry node - // that generated the event. Generally, this will be resolved by - // os.Hostname() along with the running port. - Addr string `json:"addr,omitempty"` - - // InstanceID identifies a running instance of an application. Changes - // after each restart. - InstanceID string `json:"instanceID,omitempty"` -} - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("sink: closed") -) - -// Sink accepts and sends events. -type Sink interface { - // Write writes one or more events to the sink. If no error is returned, - // the caller will assume that all events have been committed and will not - // try to send them again. If an error is received, the caller may retry - // sending the event. The caller should cede the slice of memory to the - // sink and not modify it after calling this method. - Write(events ...Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go deleted file mode 100644 index 465434f1..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go +++ /dev/null @@ -1,147 +0,0 @@ -package notifications - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" -) - -// httpSink implements a single-flight, http notification endpoint. This is -// very lightweight in that it only makes an attempt at an http request. -// Reliability should be provided by the caller. -type httpSink struct { - url string - - mu sync.Mutex - closed bool - client *http.Client - listeners []httpStatusListener - - // TODO(stevvooe): Allow one to configure the media type accepted by this - // sink and choose the serialization based on that. -} - -// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other -// sinks for increased reliability. -func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { - return &httpSink{ - url: u, - listeners: listeners, - client: &http.Client{ - Transport: &headerRoundTripper{ - Transport: http.DefaultTransport.(*http.Transport), - headers: headers, - }, - Timeout: timeout, - }, - } -} - -// httpStatusListener is called on various outcomes of sending notifications. -type httpStatusListener interface { - success(status int, events ...Event) - failure(status int, events ...Event) - err(err error, events ...Event) -} - -// Accept makes an attempt to notify the endpoint, returning an error if it -// fails. It is the caller's responsibility to retry on error. The events are -// accepted or rejected as a group. -func (hs *httpSink) Write(events ...Event) error { - hs.mu.Lock() - defer hs.mu.Unlock() - defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() - - if hs.closed { - return ErrSinkClosed - } - - envelope := Envelope{ - Events: events, - } - - // TODO(stevvooe): It is not ideal to keep re-encoding the request body on - // retry but we are going to do it to keep the code simple. It is likely - // we could change the event struct to manage its own buffer. - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) - } - - body := bytes.NewReader(p) - resp, err := hs.client.Post(hs.url, EventsMediaType, body) - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - - return fmt.Errorf("%v: error posting: %v", hs, err) - } - defer resp.Body.Close() - - // The notifier will treat any 2xx or 3xx response as accepted by the - // endpoint. - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - for _, listener := range hs.listeners { - listener.success(resp.StatusCode, events...) - } - - // TODO(stevvooe): This is a little accepting: we may want to support - // unsupported media type responses with retries using the correct - // media type. There may also be cases that will never work. - - return nil - default: - for _, listener := range hs.listeners { - listener.failure(resp.StatusCode, events...) - } - return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) - } -} - -// Close the endpoint -func (hs *httpSink) Close() error { - hs.mu.Lock() - defer hs.mu.Unlock() - - if hs.closed { - return fmt.Errorf("httpsink: already closed") - } - - hs.closed = true - return nil -} - -func (hs *httpSink) String() string { - return fmt.Sprintf("httpSink{%s}", hs.url) -} - -type headerRoundTripper struct { - *http.Transport // must be transport to support CancelRequest - headers http.Header -} - -func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var nreq http.Request - nreq = *req - nreq.Header = make(http.Header) - - merge := func(headers http.Header) { - for k, v := range headers { - nreq.Header[k] = append(nreq.Header[k], v...) - } - } - - merge(req.Header) - merge(hrt.headers) - - return hrt.Transport.RoundTrip(&nreq) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go deleted file mode 100644 index 21857edf..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go +++ /dev/null @@ -1,201 +0,0 @@ -package notifications - -import ( - "net/http" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -// ManifestListener describes a set of methods for listening to events related to manifests. -type ManifestListener interface { - ManifestPushed(repo string, sm distribution.Manifest) error - ManifestPulled(repo string, sm distribution.Manifest) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - ManifestDeleted(repo string, sm distribution.Manifest) error -} - -// BlobListener describes a listener that can respond to layer related events. -type BlobListener interface { - BlobPushed(repo string, desc distribution.Descriptor) error - BlobPulled(repo string, desc distribution.Descriptor) error - BlobMounted(repo string, desc distribution.Descriptor, fromRepo string) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - BlobDeleted(repo string, desc distribution.Descriptor) error -} - -// Listener combines all repository events into a single interface. -type Listener interface { - ManifestListener - BlobListener -} - -type repositoryListener struct { - distribution.Repository - listener Listener -} - -// Listen dispatches events on the repository to the listener. -func Listen(repo distribution.Repository, listener Listener) distribution.Repository { - return &repositoryListener{ - Repository: repo, - listener: listener, - } -} - -func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - manifests, err := rl.Repository.Manifests(ctx, options...) - if err != nil { - return nil, err - } - return &manifestServiceListener{ - ManifestService: manifests, - parent: rl, - }, nil -} - -func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { - return &blobServiceListener{ - BlobStore: rl.Repository.Blobs(ctx), - parent: rl, - } -} - -type manifestServiceListener struct { - distribution.ManifestService - parent *repositoryListener -} - -func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - sm, err := msl.ManifestService.Get(ctx, dgst) - if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) - } - } - - return sm, err -} - -func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - dgst, err := msl.ManifestService.Put(ctx, sm, options...) - - if err == nil { - if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest push to listener: %v", err) - } - } - - return dgst, err -} - -type blobServiceListener struct { - distribution.BlobStore - parent *repositoryListener -} - -var _ distribution.BlobStore = &blobServiceListener{} - -func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - p, err := bsl.BlobStore.Get(ctx, dgst) - if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) - } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - } - - return p, err -} - -func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - rc, err := bsl.BlobStore.Open(ctx, dgst) - if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) - } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - } - - return rc, err -} - -func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) - if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) - } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - } - - return err -} - -func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - desc, err := bsl.BlobStore.Put(ctx, mediaType, p) - if err == nil { - if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - - return desc, err -} - -func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - wr, err := bsl.BlobStore.Create(ctx, options...) - switch err := err.(type) { - case distribution.ErrBlobMounted: - if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Name(), err.Descriptor, err.From.Name()); err != nil { - context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) - } - return nil, err - } - return bsl.decorateWriter(wr), err -} - -func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - wr, err := bsl.BlobStore.Resume(ctx, id) - return bsl.decorateWriter(wr), err -} - -func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { - return &blobWriterListener{ - BlobWriter: wr, - parent: bsl, - } -} - -type blobWriterListener struct { - distribution.BlobWriter - parent *blobServiceListener -} - -func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - committed, err := bwl.BlobWriter.Commit(ctx, desc) - if err == nil { - if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { - context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) - } - } - - return committed, err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go deleted file mode 100644 index 2a8ffcbd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go +++ /dev/null @@ -1,152 +0,0 @@ -package notifications - -import ( - "expvar" - "fmt" - "net/http" - "sync" -) - -// EndpointMetrics track various actions taken by the endpoint, typically by -// number of events. The goal of this to export it via expvar but we may find -// some other future solution to be better. -type EndpointMetrics struct { - Pending int // events pending in queue - Events int // total events incoming - Successes int // total events written successfully - Failures int // total events failed - Errors int // total events errored - Statuses map[string]int // status code histogram, per call event -} - -// safeMetrics guards the metrics implementation with a lock and provides a -// safe update function. -type safeMetrics struct { - EndpointMetrics - sync.Mutex // protects statuses map -} - -// newSafeMetrics returns safeMetrics with map allocated. -func newSafeMetrics() *safeMetrics { - var sm safeMetrics - sm.Statuses = make(map[string]int) - return &sm -} - -// httpStatusListener returns the listener for the http sink that updates the -// relevent counters. -func (sm *safeMetrics) httpStatusListener() httpStatusListener { - return &endpointMetricsHTTPStatusListener{ - safeMetrics: sm, - } -} - -// eventQueueListener returns a listener that maintains queue related counters. -func (sm *safeMetrics) eventQueueListener() eventQueueListener { - return &endpointMetricsEventQueueListener{ - safeMetrics: sm, - } -} - -// endpointMetricsHTTPStatusListener increments counters related to http sinks -// for the relevent events. -type endpointMetricsHTTPStatusListener struct { - *safeMetrics -} - -var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} - -func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Successes += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Failures += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Errors += len(events) -} - -// endpointMetricsEventQueueListener maintains the incoming events counter and -// the queues pending count. -type endpointMetricsEventQueueListener struct { - *safeMetrics -} - -func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Events += len(events) - eqc.Pending += len(events) -} - -func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Pending -= len(events) -} - -// endpoints is global registry of endpoints used to report metrics to expvar -var endpoints struct { - registered []*Endpoint - mu sync.Mutex -} - -// register places the endpoint into expvar so that stats are tracked. -func register(e *Endpoint) { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - endpoints.registered = append(endpoints.registered, e) -} - -func init() { - // NOTE(stevvooe): Setup registry metrics structure to report to expvar. - // Ideally, we do more metrics through logging but we need some nice - // realtime metrics for queue state for now. - - registry := expvar.Get("registry") - - if registry == nil { - registry = expvar.NewMap("registry") - } - - var notifications expvar.Map - notifications.Init() - notifications.Set("endpoints", expvar.Func(func() interface{} { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - var names []interface{} - for _, v := range endpoints.registered { - var epjson struct { - Name string `json:"name"` - URL string `json:"url"` - EndpointConfig - - Metrics EndpointMetrics - } - - epjson.Name = v.Name() - epjson.URL = v.URL() - epjson.EndpointConfig = v.EndpointConfig - - v.ReadMetrics(&epjson.Metrics) - - names = append(names, epjson) - } - - return names - })) - - registry.(*expvar.Map).Set("notifications", ¬ifications) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go deleted file mode 100644 index dda4a565..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go +++ /dev/null @@ -1,337 +0,0 @@ -package notifications - -import ( - "container/list" - "fmt" - "sync" - "time" - - "github.com/Sirupsen/logrus" -) - -// NOTE(stevvooe): This file contains definitions for several utility sinks. -// Typically, the broadcaster is the only sink that should be required -// externally, but others are suitable for export if the need arises. Albeit, -// the tight integration with endpoint metrics should be removed. - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan []Event - closed chan chan struct{} -} - -// NewBroadcaster ... -// Add appends one or more sinks to the list of sinks. The broadcaster -// behavior will be affected by the properties of the sink. Generally, the -// sink should accept all messages and deal with reliability on its own. Use -// of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan []Event), - closed: make(chan chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts a block of events to be dispatched to all sinks. This method -// will never fail and should never block (hopefully!). The caller cedes the -// slice memory to the broadcaster and should not modify it after calling -// write. -func (b *Broadcaster) Write(events ...Event) error { - select { - case b.events <- events: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - logrus.Infof("broadcaster: closing") - select { - case <-b.closed: - // already closed - return fmt.Errorf("broadcaster: already closed") - default: - // do a little chan handoff dance to synchronize closing - closed := make(chan struct{}) - b.closed <- closed - close(b.closed) - <-closed - return nil - } -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - for { - select { - case block := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(block...); err != nil { - logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) - } - } - case closing := <-b.closed: - - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil { - logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) - } - } - closing <- struct{}{} - - logrus.Debugf("broadcaster: closed") - return - } - } -} - -// eventQueue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type eventQueue struct { - sink Sink - events *list.List - listeners []eventQueueListener - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// eventQueueListener is called when various events happen on the queue. -type eventQueueListener interface { - ingress(events ...Event) - egress(events ...Event) -} - -// newEventQueue returns a queue to the provided sink. If the updater is non- -// nil, it will be called to update pending metrics on ingress and egress. -func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { - eq := eventQueue{ - sink: sink, - events: list.New(), - listeners: listeners, - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// beend closed. -func (eq *eventQueue) Write(events ...Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - for _, listener := range eq.listeners { - listener.ingress(events...) - } - eq.events.PushBack(events) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *eventQueue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return fmt.Errorf("eventqueue: already closed") - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - - return eq.sink.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *eventQueue) run() { - for { - block := eq.next() - - if block == nil { - return // nil block means event queue is closed. - } - - if err := eq.sink.Write(block...); err != nil { - logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) - } - - for _, listener := range eq.listeners { - listener.egress(block...) - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *eventQueue) next() []Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.([]Event) - eq.events.Remove(front) - - return block -} - -// retryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Internally, it is a circuit breaker retries to manage reset. -// Concurrent calls to a retrying sink are serialized through the sink, -// meaning that if one is in-flight, another will not proceed. -type retryingSink struct { - mu sync.Mutex - sink Sink - closed bool - - // circuit breaker heuristics - failures struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - } -} - -type retryingSinkListener interface { - active(events ...Event) - retry(events ...Event) -} - -// TODO(stevvooe): We are using circuit break here, which actually doesn't -// make a whole lot of sense for this use case, since we always retry. Move -// this to use bounded exponential backoff. - -// newRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { - rs := &retryingSink{ - sink: sink, - } - rs.failures.threshold = threshold - rs.failures.backoff = backoff - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *retryingSink) Write(events ...Event) error { - rs.mu.Lock() - defer rs.mu.Unlock() - -retry: - - if rs.closed { - return ErrSinkClosed - } - - if !rs.proceed() { - logrus.Warnf("%v encountered too many errors, backing off", rs.sink) - rs.wait(rs.failures.backoff) - goto retry - } - - if err := rs.write(events...); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logrus.Errorf("retryingsink: error writing events: %v, retrying", err) - goto retry - } - - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *retryingSink) Close() error { - rs.mu.Lock() - defer rs.mu.Unlock() - - if rs.closed { - return fmt.Errorf("retryingsink: already closed") - } - - rs.closed = true - return rs.sink.Close() -} - -// write provides a helper that dispatches failure and success properly. Used -// by write as the single-flight write call. -func (rs *retryingSink) write(events ...Event) error { - if err := rs.sink.Write(events...); err != nil { - rs.failure() - return err - } - - rs.reset() - return nil -} - -// wait backoff time against the sink, unlocking so others can proceed. Should -// only be called by methods that currently have the mutex. -func (rs *retryingSink) wait(backoff time.Duration) { - rs.mu.Unlock() - defer rs.mu.Lock() - - // backoff here - time.Sleep(backoff) -} - -// reset marks a successful call. -func (rs *retryingSink) reset() { - rs.failures.recent = 0 - rs.failures.last = time.Time{} -} - -// failure records a failure. -func (rs *retryingSink) failure() { - rs.failures.recent++ - rs.failures.last = time.Now().UTC() -} - -// proceed returns true if the call should proceed based on circuit breaker -// heuristics. -func (rs *retryingSink) proceed() bool { - return rs.failures.recent < rs.failures.threshold || - time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile deleted file mode 100644 index 1e2a8471..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM ubuntu:14.04 - -ENV GOLANG_VERSION 1.4rc1 -ENV GOPATH /var/cache/drone -ENV GOROOT /usr/local/go -ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin - -ENV LANG C -ENV LC_ALL C - -RUN apt-get update && apt-get install -y \ - wget ca-certificates git mercurial bzr \ - --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ - tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ - rm go${GOLANG_VERSION}.linux-amd64.tar.gz - -RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md deleted file mode 100644 index eda88696..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Git Hooks -========= - -To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. - -As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh deleted file mode 100644 index 6afea8a1..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -cd $(dirname $0) - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -set -e -set -x - -# Just in case the directory doesn't exist -mkdir -p $REPO_ROOT/.git/hooks - -ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit deleted file mode 100644 index 3ee2e913..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -cd $REPO_ROOT - -GOFMT_ERRORS=$(gofmt -s -l . 2>&1) -if [ -n "$GOFMT_ERRORS" ]; then - printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr - exit 1 -fi - -GOLINT_ERRORS=$(golint ./... 2>&1) -if [ -n "$GOLINT_ERRORS" ]; then - printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr - exit 1 -fi - -GOVET_ERRORS=$(go vet ./... 2>&1) -GOVET_STATUS=$? -if [ "$GOVET_STATUS" -ne "0" ]; then - printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr - exit $GOVET_STATUS -fi diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go deleted file mode 100644 index b3bb580d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go +++ /dev/null @@ -1,144 +0,0 @@ -// Package auth defines a standard interface for request access controllers. -// -// An access controller has a simple interface with a single `Authorized` -// method which checks that a given request is authorized to perform one or -// more actions on one or more resources. This method should return a non-nil -// error if the request is not authorized. -// -// An implementation registers its access controller by name with a constructor -// which accepts an options map for configuring the access controller. -// -// options := map[string]interface{}{"sillySecret": "whysosilly?"} -// accessController, _ := auth.GetAccessController("silly", options) -// -// This `accessController` can then be used in a request handler like so: -// -// func updateOrder(w http.ResponseWriter, r *http.Request) { -// orderNumber := r.FormValue("orderNumber") -// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} -// access := auth.Access{Resource: resource, Action: "update"} -// -// if ctx, err := accessController.Authorized(ctx, access); err != nil { -// if challenge, ok := err.(auth.Challenge) { -// // Let the challenge write the response. -// challenge.SetHeaders(w) -// w.WriteHeader(http.StatusUnauthorized) -// return -// } else { -// // Some other error. -// } -// } -// } -// -package auth - -import ( - "fmt" - "net/http" - - "github.com/docker/distribution/context" -) - -// UserInfo carries information about -// an autenticated/authorized client. -type UserInfo struct { - Name string -} - -// Resource describes a resource by type and name. -type Resource struct { - Type string - Name string -} - -// Access describes a specific action that is -// requested or allowed for a given resource. -type Access struct { - Resource - Action string -} - -// Challenge is a special error type which is used for HTTP 401 Unauthorized -// responses and is able to write the response with WWW-Authenticate challenge -// header values based on the error. -type Challenge interface { - error - - // SetHeaders prepares the request to conduct a challenge response by - // adding the an HTTP challenge header on the response message. Callers - // are expected to set the appropriate HTTP status code (e.g. 401) - // themselves. - SetHeaders(w http.ResponseWriter) -} - -// AccessController controls access to registry resources based on a request -// and required access levels for a request. Implementations can support both -// complete denial and http authorization challenges. -type AccessController interface { - // Authorized returns a non-nil error if the context is granted access and - // returns a new authorized context. If one or more Access structs are - // provided, the requested access will be compared with what is available - // to the context. The given context will contain a "http.request" key with - // a `*http.Request` value. If the error is non-nil, access should always - // be denied. The error may be of type Challenge, in which case the caller - // may have the Challenge handle the request or choose what action to take - // based on the Challenge header or response status. The returned context - // object should have a "auth.user" value set to a UserInfo struct. - Authorized(ctx context.Context, access ...Access) (context.Context, error) -} - -// WithUser returns a context with the authorized user info. -func WithUser(ctx context.Context, user UserInfo) context.Context { - return userInfoContext{ - Context: ctx, - user: user, - } -} - -type userInfoContext struct { - context.Context - user UserInfo -} - -func (uic userInfoContext) Value(key interface{}) interface{} { - switch key { - case "auth.user": - return uic.user - case "auth.user.name": - return uic.user.Name - } - - return uic.Context.Value(key) -} - -// InitFunc is the type of an AccessController factory function and is used -// to register the constructor for different AccesController backends. -type InitFunc func(options map[string]interface{}) (AccessController, error) - -var accessControllers map[string]InitFunc - -func init() { - accessControllers = make(map[string]InitFunc) -} - -// Register is used to register an InitFunc for -// an AccessController backend with the given name. -func Register(name string, initFunc InitFunc) error { - if _, exists := accessControllers[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - accessControllers[name] = initFunc - - return nil -} - -// GetAccessController constructs an AccessController -// with the given options using the named backend. -func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { - if initFunc, exists := accessControllers[name]; exists { - return initFunc(options) - } - - return nil, fmt.Errorf("no access controller registered with name: %s", name) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go deleted file mode 100644 index 82d3556d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go +++ /dev/null @@ -1,102 +0,0 @@ -// Package htpasswd provides a simple authentication scheme that checks for the -// user credential hash in an htpasswd formatted file in a configuration-determined -// location. -// -// This authentication method MUST be used under TLS, as simple token-replay attack is possible. -package htpasswd - -import ( - "errors" - "fmt" - "net/http" - "os" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -var ( - // ErrInvalidCredential is returned when the auth token does not authenticate correctly. - ErrInvalidCredential = errors.New("invalid authorization credential") - - // ErrAuthenticationFailure returned when authentication failure to be presented to agent. - ErrAuthenticationFailure = errors.New("authentication failured") -) - -type accessController struct { - realm string - htpasswd *htpasswd -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) - } - - path, present := options["path"] - if _, ok := path.(string); !present || !ok { - return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) - } - - f, err := os.Open(path.(string)) - if err != nil { - return nil, err - } - defer f.Close() - - h, err := newHTPasswd(f) - if err != nil { - return nil, err - } - - return &accessController{realm: realm.(string), htpasswd: h}, nil -} - -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - username, password, ok := req.BasicAuth() - if !ok { - return nil, &challenge{ - realm: ac.realm, - err: ErrInvalidCredential, - } - } - - if err := ac.htpasswd.authenticateUser(username, password); err != nil { - context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) - return nil, &challenge{ - realm: ac.realm, - err: ErrAuthenticationFailure, - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil -} - -// challenge implements the auth.Challenge interface. -type challenge struct { - realm string - err error -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets the basic challenge header on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) -} - -func init() { - auth.Register("htpasswd", auth.InitFunc(newAccessController)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go deleted file mode 100644 index 494ad0a7..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go +++ /dev/null @@ -1,80 +0,0 @@ -package htpasswd - -import ( - "bufio" - "fmt" - "io" - "strings" - - "golang.org/x/crypto/bcrypt" -) - -// htpasswd holds a path to a system .htpasswd file and the machinery to parse -// it. Only bcrypt hash entries are supported. -type htpasswd struct { - entries map[string][]byte // maps username to password byte slice. -} - -// newHTPasswd parses the reader and returns an htpasswd or an error. -func newHTPasswd(rd io.Reader) (*htpasswd, error) { - entries, err := parseHTPasswd(rd) - if err != nil { - return nil, err - } - - return &htpasswd{entries: entries}, nil -} - -// AuthenticateUser checks a given user:password credential against the -// receiving HTPasswd's file. If the check passes, nil is returned. -func (htpasswd *htpasswd) authenticateUser(username string, password string) error { - credentials, ok := htpasswd.entries[username] - if !ok { - // timing attack paranoia - bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) - - return ErrAuthenticationFailure - } - - err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) - if err != nil { - return ErrAuthenticationFailure - } - - return nil -} - -// parseHTPasswd parses the contents of htpasswd. This will read all the -// entries in the file, whether or not they are needed. An error is returned -// if an syntax errors are encountered or if the reader fails. -func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { - entries := map[string][]byte{} - scanner := bufio.NewScanner(rd) - var line int - for scanner.Scan() { - line++ // 1-based line numbering - t := strings.TrimSpace(scanner.Text()) - - if len(t) < 1 { - continue - } - - // lines that *begin* with a '#' are considered comments - if t[0] == '#' { - continue - } - - i := strings.Index(t, ":") - if i < 0 || i >= len(t) { - return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) - } - - entries[t[:i]] = []byte(t[i+1:]) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - return entries, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go deleted file mode 100644 index 2b801d94..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package silly provides a simple authentication scheme that checks for the -// existence of an Authorization header and issues access if is present and -// non-empty. -// -// This package is present as an example implementation of a minimal -// auth.AccessController and for testing. This is not suitable for any kind of -// production security. -package silly - -import ( - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -// accessController provides a simple implementation of auth.AccessController -// that simply checks for a non-empty Authorization header. It is useful for -// demonstration and testing. -type accessController struct { - realm string - service string -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for silly access controller`) - } - - service, present := options["service"] - if _, ok := service.(string); !present || !ok { - return nil, fmt.Errorf(`"service" must be set for silly access controller`) - } - - return &accessController{realm: realm.(string), service: service.(string)}, nil -} - -// Authorized simply checks for the existence of the authorization header, -// responding with a bearer challenge if it doesn't exist. -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - if req.Header.Get("Authorization") == "" { - challenge := challenge{ - realm: ac.realm, - service: ac.service, - } - - if len(accessRecords) > 0 { - var scopes []string - for _, access := range accessRecords { - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) - } - challenge.scope = strings.Join(scopes, " ") - } - - return nil, &challenge - } - - return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil -} - -type challenge struct { - realm string - service string - scope string -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets a simple bearer challenge on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) - - if ch.scope != "" { - header = fmt.Sprintf("%s,scope=%q", header, ch.scope) - } - - w.Header().Set("WWW-Authenticate", header) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("silly authentication challenge: %#v", ch) -} - -// init registers the silly auth backend. -func init() { - auth.Register("silly", auth.InitFunc(newAccessController)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go deleted file mode 100644 index 5b1ff7ca..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go +++ /dev/null @@ -1,268 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" -) - -// accessSet maps a typed, named resource to -// a set of actions requested or authorized. -type accessSet map[auth.Resource]actionSet - -// newAccessSet constructs an accessSet from -// a variable number of auth.Access items. -func newAccessSet(accessItems ...auth.Access) accessSet { - accessSet := make(accessSet, len(accessItems)) - - for _, access := range accessItems { - resource := auth.Resource{ - Type: access.Type, - Name: access.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - set.add(access.Action) - } - - return accessSet -} - -// contains returns whether or not the given access is in this accessSet. -func (s accessSet) contains(access auth.Access) bool { - actionSet, ok := s[access.Resource] - if ok { - return actionSet.contains(access.Action) - } - - return false -} - -// scopeParam returns a collection of scopes which can -// be used for a WWW-Authenticate challenge parameter. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (s accessSet) scopeParam() string { - scopes := make([]string, 0, len(s)) - - for resource, actionSet := range s { - actions := strings.Join(actionSet.keys(), ",") - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) - } - - return strings.Join(scopes, " ") -} - -// Errors used and exported by this package. -var ( - ErrInsufficientScope = errors.New("insufficient scope") - ErrTokenRequired = errors.New("authorization token required") -) - -// authChallenge implements the auth.Challenge interface. -type authChallenge struct { - err error - realm string - service string - accessSet accessSet -} - -var _ auth.Challenge = authChallenge{} - -// Error returns the internal error string for this authChallenge. -func (ac authChallenge) Error() string { - return ac.err.Error() -} - -// Status returns the HTTP Response Status Code for this authChallenge. -func (ac authChallenge) Status() int { - return http.StatusUnauthorized -} - -// challengeParams constructs the value to be used in -// the WWW-Authenticate response challenge header. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (ac authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) - - if scope := ac.accessSet.scopeParam(); scope != "" { - str = fmt.Sprintf("%s,scope=%q", str, scope) - } - - if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { - str = fmt.Sprintf("%s,error=%q", str, "invalid_token") - } else if ac.err == ErrInsufficientScope { - str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") - } - - return str -} - -// SetChallenge sets the WWW-Authenticate value for the response. -func (ac authChallenge) SetHeaders(w http.ResponseWriter) { - w.Header().Add("WWW-Authenticate", ac.challengeParams()) -} - -// accessController implements the auth.AccessController interface. -type accessController struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey -} - -// tokenAccessOptions is a convenience type for handling -// options to the contstructor of an accessController. -type tokenAccessOptions struct { - realm string - issuer string - service string - rootCertBundle string -} - -// checkOptions gathers the necessary options -// for an accessController from the given map. -func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { - var opts tokenAccessOptions - - keys := []string{"realm", "issuer", "service", "rootcertbundle"} - vals := make([]string, 0, len(keys)) - for _, key := range keys { - val, ok := options[key].(string) - if !ok { - return opts, fmt.Errorf("token auth requires a valid option string: %q", key) - } - vals = append(vals, val) - } - - opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] - - return opts, nil -} - -// newAccessController creates an accessController using the given options. -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - config, err := checkOptions(options) - if err != nil { - return nil, err - } - - fp, err := os.Open(config.rootCertBundle) - if err != nil { - return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - defer fp.Close() - - rawCertBundle, err := ioutil.ReadAll(fp) - if err != nil { - return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - - var rootCerts []*x509.Certificate - pemBlock, rawCertBundle := pem.Decode(rawCertBundle) - for pemBlock != nil { - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) - } - - rootCerts = append(rootCerts, cert) - - pemBlock, rawCertBundle = pem.Decode(rawCertBundle) - } - - if len(rootCerts) == 0 { - return nil, errors.New("token auth requires at least one token signing root certificate") - } - - rootPool := x509.NewCertPool() - trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) - if err != nil { - return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) - } - trustedKeys[pubKey.KeyID()] = pubKey - } - - return &accessController{ - realm: config.realm, - issuer: config.issuer, - service: config.service, - rootCerts: rootPool, - trustedKeys: trustedKeys, - }, nil -} - -// Authorized handles checking whether the given request is authorized -// for actions on resources described by the given access items. -func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { - challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - accessSet: newAccessSet(accessItems...), - } - - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - parts := strings.Split(req.Header.Get("Authorization"), " ") - - if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { - challenge.err = ErrTokenRequired - return nil, challenge - } - - rawToken := parts[1] - - token, err := NewToken(rawToken) - if err != nil { - challenge.err = err - return nil, challenge - } - - verifyOpts := VerifyOptions{ - TrustedIssuers: []string{ac.issuer}, - AcceptedAudiences: []string{ac.service}, - Roots: ac.rootCerts, - TrustedKeys: ac.trustedKeys, - } - - if err = token.Verify(verifyOpts); err != nil { - challenge.err = err - return nil, challenge - } - - accessSet := token.accessSet() - for _, access := range accessItems { - if !accessSet.contains(access) { - challenge.err = ErrInsufficientScope - return nil, challenge - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil -} - -// init handles registering the token auth backend. -func init() { - auth.Register("token", auth.InitFunc(newAccessController)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go deleted file mode 100644 index 1d04f104..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go +++ /dev/null @@ -1,35 +0,0 @@ -package token - -// StringSet is a useful type for looking up strings. -type stringSet map[string]struct{} - -// NewStringSet creates a new StringSet with the given strings. -func newStringSet(keys ...string) stringSet { - ss := make(stringSet, len(keys)) - ss.add(keys...) - return ss -} - -// Add inserts the given keys into this StringSet. -func (ss stringSet) add(keys ...string) { - for _, key := range keys { - ss[key] = struct{}{} - } -} - -// Contains returns whether the given key is in this StringSet. -func (ss stringSet) contains(key string) bool { - _, ok := ss[key] - return ok -} - -// Keys returns a slice of all keys in this StringSet. -func (ss stringSet) keys() []string { - keys := make([]string, 0, len(ss)) - - for key := range ss { - keys = append(keys, key) - } - - return keys -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go deleted file mode 100644 index 166816ee..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go +++ /dev/null @@ -1,343 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" - - "github.com/docker/distribution/registry/auth" -) - -const ( - // TokenSeparator is the value which separates the header, claims, and - // signature in the compact serialization of a JSON Web Token. - TokenSeparator = "." -) - -// Errors used by token parsing and verification. -var ( - ErrMalformedToken = errors.New("malformed token") - ErrInvalidToken = errors.New("invalid token") -) - -// ResourceActions stores allowed actions on a named and typed resource. -type ResourceActions struct { - Type string `json:"type"` - Name string `json:"name"` - Actions []string `json:"actions"` -} - -// ClaimSet describes the main section of a JSON Web Token. -type ClaimSet struct { - // Public claims - Issuer string `json:"iss"` - Subject string `json:"sub"` - Audience string `json:"aud"` - Expiration int64 `json:"exp"` - NotBefore int64 `json:"nbf"` - IssuedAt int64 `json:"iat"` - JWTID string `json:"jti"` - - // Private claims - Access []*ResourceActions `json:"access"` -} - -// Header describes the header section of a JSON Web Token. -type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK json.RawMessage `json:"jwk,omitempty"` -} - -// Token describes a JSON Web Token. -type Token struct { - Raw string - Header *Header - Claims *ClaimSet - Signature []byte -} - -// VerifyOptions is used to specify -// options when verifying a JSON Web Token. -type VerifyOptions struct { - TrustedIssuers []string - AcceptedAudiences []string - Roots *x509.CertPool - TrustedKeys map[string]libtrust.PublicKey -} - -// NewToken parses the given raw token string -// and constructs an unverified JSON Web Token. -func NewToken(rawToken string) (*Token, error) { - parts := strings.Split(rawToken, TokenSeparator) - if len(parts) != 3 { - return nil, ErrMalformedToken - } - - var ( - rawHeader, rawClaims = parts[0], parts[1] - headerJSON, claimsJSON []byte - err error - ) - - defer func() { - if err != nil { - log.Errorf("error while unmarshalling raw token: %s", err) - } - }() - - if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { - err = fmt.Errorf("unable to decode header: %s", err) - return nil, ErrMalformedToken - } - - if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { - err = fmt.Errorf("unable to decode claims: %s", err) - return nil, ErrMalformedToken - } - - token := new(Token) - token.Header = new(Header) - token.Claims = new(ClaimSet) - - token.Raw = strings.Join(parts[:2], TokenSeparator) - if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { - err = fmt.Errorf("unable to decode signature: %s", err) - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(headerJSON, token.Header); err != nil { - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { - return nil, ErrMalformedToken - } - - return token, nil -} - -// Verify attempts to verify this token using the given options. -// Returns a nil error if the token is valid. -func (t *Token) Verify(verifyOpts VerifyOptions) error { - // Verify that the Issuer claim is a trusted authority. - if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { - log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) - return ErrInvalidToken - } - - // Verify that the Audience claim is allowed. - if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { - log.Errorf("token intended for another audience: %q", t.Claims.Audience) - return ErrInvalidToken - } - - // Verify that the token is currently usable and not expired. - currentUnixTime := time.Now().Unix() - if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { - log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) - return ErrInvalidToken - } - - // Verify the token signature. - if len(t.Signature) == 0 { - log.Error("token has no signature") - return ErrInvalidToken - } - - // Verify that the signing key is trusted. - signingKey, err := t.VerifySigningKey(verifyOpts) - if err != nil { - log.Error(err) - return ErrInvalidToken - } - - // Finally, verify the signature of the token using the key which signed it. - if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { - log.Errorf("unable to verify token signature: %s", err) - return ErrInvalidToken - } - - return nil -} - -// VerifySigningKey attempts to get the key which was used to sign this token. -// The token header should contain either of these 3 fields: -// `x5c` - The x509 certificate chain for the signing key. Needs to be -// verified. -// `jwk` - The JSON Web Key representation of the signing key. -// May contain its own `x5c` field which needs to be verified. -// `kid` - The unique identifier for the key. This library interprets it -// as a libtrust fingerprint. The key itself can be looked up in -// the trustedKeys field of the given verify options. -// Each of these methods are tried in that order of preference until the -// signing key is found or an error is returned. -func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { - // First attempt to get an x509 certificate chain from the header. - var ( - x5c = t.Header.X5c - rawJWK = t.Header.RawJWK - keyID = t.Header.KeyID - ) - - switch { - case len(x5c) > 0: - signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case len(rawJWK) > 0: - signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) - case len(keyID) > 0: - signingKey = verifyOpts.TrustedKeys[keyID] - if signingKey == nil { - err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) - } - default: - err = errors.New("unable to get token signing key") - } - - return -} - -func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { - if len(x5c) == 0 { - return nil, errors.New("empty x509 certificate chain") - } - - // Ensure the first element is encoded correctly. - leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) - if err != nil { - return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) - } - - // And that it is a valid x509 certificate. - leafCert, err := x509.ParseCertificate(leafCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) - } - - // The rest of the certificate chain are intermediate certificates. - intermediates := x509.NewCertPool() - for i := 1; i < len(x5c); i++ { - intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) - } - - intermediateCert, err := x509.ParseCertificate(intermediateCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) - } - - intermediates.AddCert(intermediateCert) - } - - verifyOpts := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: roots, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - } - - // TODO: this call returns certificate chains which we ignore for now, but - // we should check them for revocations if we have the ability later. - if _, err = leafCert.Verify(verifyOpts); err != nil { - return nil, fmt.Errorf("unable to verify certificate chain: %s", err) - } - - // Get the public key from the leaf certificate. - leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) - if !ok { - return nil, errors.New("unable to get leaf cert public key value") - } - - leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) - if err != nil { - return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) - } - - return -} - -func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) - if err != nil { - return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) - } - - // Check to see if the key includes a certificate chain. - x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) - if !ok { - // The JWK should be one of the trusted root keys. - if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { - return nil, errors.New("untrusted JWK with no certificate chain") - } - - // The JWK is one of the trusted keys. - return - } - - // Ensure each item in the chain is of the correct type. - x5c := make([]string, len(x5cVal)) - for i, val := range x5cVal { - certString, ok := val.(string) - if !ok || len(certString) == 0 { - return nil, errors.New("malformed certificate chain") - } - x5c[i] = certString - } - - // Ensure that the x509 certificate chain can - // be verified up to one of our trusted roots. - leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) - if err != nil { - return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) - } - - // Verify that the public key in the leaf cert *is* the signing key. - if pubKey.KeyID() != leafKey.KeyID() { - return nil, errors.New("leaf certificate public key ID does not match JWK key ID") - } - - return -} - -// accessSet returns a set of actions available for the resource -// actions listed in the `access` section of this token. -func (t *Token) accessSet() accessSet { - if t.Claims == nil { - return nil - } - - accessSet := make(accessSet, len(t.Claims.Access)) - - for _, resourceActions := range t.Claims.Access { - resource := auth.Resource{ - Type: resourceActions.Type, - Name: resourceActions.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - for _, action := range resourceActions.Actions { - set.add(action) - } - } - - return accessSet -} - -func (t *Token) compactRaw() string { - return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go deleted file mode 100644 index d7f95be4..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go +++ /dev/null @@ -1,58 +0,0 @@ -package token - -import ( - "encoding/base64" - "errors" - "strings" -) - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters omitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -// actionSet is a special type of stringSet. -type actionSet struct { - stringSet -} - -func newActionSet(actions ...string) actionSet { - return actionSet{newStringSet(actions...)} -} - -// Contains calls StringSet.Contains() for -// either "*" or the given action string. -func (s actionSet) contains(action string) bool { - return s.stringSet.contains("*") || s.stringSet.contains(action) -} - -// contains returns true if q is found in ss. -func contains(ss []string, q string) bool { - for _, s := range ss { - if s == q { - return true - } - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go deleted file mode 100644 index a1ba7f3a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package registry provides the main entrypoints for running a registry. -package registry diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go deleted file mode 100644 index 23225493..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go +++ /dev/null @@ -1,964 +0,0 @@ -package handlers - -import ( - cryptorand "crypto/rand" - "expvar" - "fmt" - "math/rand" - "net" - "net/http" - "net/url" - "os" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/health/checks" - "github.com/docker/distribution/notifications" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - registrymiddleware "github.com/docker/distribution/registry/middleware/registry" - repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" - "github.com/docker/distribution/registry/proxy" - "github.com/docker/distribution/registry/storage" - memorycache "github.com/docker/distribution/registry/storage/cache/memory" - rediscache "github.com/docker/distribution/registry/storage/cache/redis" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/docker/libtrust" - "github.com/garyburd/redigo/redis" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// randomSecretSize is the number of random bytes to generate if no secret -// was specified. -const randomSecretSize = 32 - -// defaultCheckInterval is the default time in between health checks -const defaultCheckInterval = 10 * time.Second - -// App is a global registry application object. Shared resources can be placed -// on this object that will be accessible from all requests. Any writable -// fields should be protected. -type App struct { - context.Context - - Config *configuration.Configuration - - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application - - // httpHost is a parsed representation of the http.host parameter from - // the configuration. Only the Scheme and Host fields are used. - httpHost url.URL - - // events contains notification related configuration. - events struct { - sink notifications.Sink - source notifications.SourceRecord - } - - redis *redis.Pool - - // trustKey is a deprecated key used to sign manifests converted to - // schema1 for backward compatibility. It should not be used for any - // other purposes. - trustKey libtrust.PrivateKey - - // isCache is true if this registry is configured as a pull through cache - isCache bool - - // readOnly is true if the registry is in a read-only maintenance mode - readOnly bool -} - -// NewApp takes a configuration and returns a configured app, ready to serve -// requests. The app only implements ServeHTTP and can be wrapped in other -// handlers accordingly. -func NewApp(ctx context.Context, configuration *configuration.Configuration) *App { - app := &App{ - Config: configuration, - Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - isCache: configuration.Proxy.RemoteURL != "", - } - - // Register the handler dispatchers. - app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { - return http.HandlerFunc(apiBase) - }) - app.register(v2.RouteNameManifest, imageManifestDispatcher) - app.register(v2.RouteNameCatalog, catalogDispatcher) - app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, blobDispatcher) - app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) - - var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. - panic(err) - } - - purgeConfig := uploadPurgeDefaultConfig() - if mc, ok := configuration.Storage["maintenance"]; ok { - if v, ok := mc["uploadpurging"]; ok { - purgeConfig, ok = v.(map[interface{}]interface{}) - if !ok { - panic("uploadpurging config key must contain additional keys") - } - } - if v, ok := mc["readonly"]; ok { - readOnly, ok := v.(map[interface{}]interface{}) - if !ok { - panic("readonly config key must contain additional keys") - } - if readOnlyEnabled, ok := readOnly["enabled"]; ok { - app.readOnly, ok = readOnlyEnabled.(bool) - if !ok { - panic("readonly's enabled config key must have a boolean value") - } - } - } - } - - startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) - - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) - if err != nil { - panic(err) - } - - app.configureSecret(configuration) - app.configureEvents(configuration) - app.configureRedis(configuration) - app.configureLogHook(configuration) - - // Generate an ephemeral key to be used for signing converted manifests - // for clients that don't support schema2. - app.trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - - if configuration.HTTP.Host != "" { - u, err := url.Parse(configuration.HTTP.Host) - if err != nil { - panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) - } - app.httpHost = *u - } - - options := []storage.RegistryOption{} - - if app.isCache { - options = append(options, storage.DisableDigestResumption) - } - - // configure deletion - if d, ok := configuration.Storage["delete"]; ok { - e, ok := d["enabled"] - if ok { - if deleteEnabled, ok := e.(bool); ok && deleteEnabled { - options = append(options, storage.EnableDelete) - } - } - } - - // configure redirects - var redirectDisabled bool - if redirectConfig, ok := configuration.Storage["redirect"]; ok { - v := redirectConfig["disable"] - switch v := v.(type) { - case bool: - redirectDisabled = v - default: - panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) - } - } - if redirectDisabled { - ctxu.GetLogger(app).Infof("backend redirection disabled") - } else { - options = append(options, storage.EnableRedirect) - } - - // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { - v, ok := cc["blobdescriptor"] - if !ok { - // Backwards compatible: "layerinfo" == "blobdescriptor" - v = cc["layerinfo"] - } - - switch v { - case "redis": - if app.redis == nil { - panic("redis configuration required to use for layerinfo cache") - } - cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) - localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) - app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) - if err != nil { - panic("could not create registry: " + err.Error()) - } - ctxu.GetLogger(app).Infof("using redis blob descriptor cache") - case "inmemory": - cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() - localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) - app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) - if err != nil { - panic("could not create registry: " + err.Error()) - } - ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") - default: - if v != "" { - ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) - } - } - } - - if app.registry == nil { - // configure the registry if no cache section is available. - app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) - if err != nil { - panic("could not create registry: " + err.Error()) - } - } - - app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) - if err != nil { - panic(err) - } - - authType := configuration.Auth.Type() - - if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) - if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) - } - app.accessController = accessController - ctxu.GetLogger(app).Debugf("configured %q access controller", authType) - } - - // configure as a pull through cache - if configuration.Proxy.RemoteURL != "" { - app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) - if err != nil { - panic(err.Error()) - } - app.isCache = true - ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) - } - - return app -} - -// RegisterHealthChecks is an awful hack to defer health check registration -// control to callers. This should only ever be called once per registry -// process, typically in a main function. The correct way would be register -// health checks outside of app, since multiple apps may exist in the same -// process. Because the configuration and app are tightly coupled, -// implementing this properly will require a refactor. This method may panic -// if called twice in the same process. -func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { - if len(healthRegistries) > 1 { - panic("RegisterHealthChecks called with more than one registry") - } - healthRegistry := health.DefaultRegistry - if len(healthRegistries) == 1 { - healthRegistry = healthRegistries[0] - } - - if app.Config.Health.StorageDriver.Enabled { - interval := app.Config.Health.StorageDriver.Interval - if interval == 0 { - interval = defaultCheckInterval - } - - storageDriverCheck := func() error { - _, err := app.driver.List(app, "/") // "/" should always exist - return err // any error will be treated as failure - } - - if app.Config.Health.StorageDriver.Threshold != 0 { - healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) - } else { - healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) - } - } - - for _, fileChecker := range app.Config.Health.FileCheckers { - interval := fileChecker.Interval - if interval == 0 { - interval = defaultCheckInterval - } - ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) - healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) - } - - for _, httpChecker := range app.Config.Health.HTTPCheckers { - interval := httpChecker.Interval - if interval == 0 { - interval = defaultCheckInterval - } - - statusCode := httpChecker.StatusCode - if statusCode == 0 { - statusCode = 200 - } - - checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) - - if httpChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) - healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) - } else { - ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) - healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) - } - } - - for _, tcpChecker := range app.Config.Health.TCPCheckers { - interval := tcpChecker.Interval - if interval == 0 { - interval = defaultCheckInterval - } - - checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) - - if tcpChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) - healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) - } else { - ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) - healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) - } - } -} - -// register a handler with the application, by route name. The handler will be -// passed through the application filters and context will be constructed at -// request time. -func (app *App) register(routeName string, dispatch dispatchFunc) { - - // TODO(stevvooe): This odd dispatcher/route registration is by-product of - // some limitations in the gorilla/mux router. We are using it to keep - // routing consistent between the client and server, but we may want to - // replace it with manual routing and structure-based dispatch for better - // control over the request execution. - - app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) -} - -// configureEvents prepares the event sink for action. -func (app *App) configureEvents(configuration *configuration.Configuration) { - // Configure all of the endpoint sinks. - var sinks []notifications.Sink - for _, endpoint := range configuration.Notifications.Endpoints { - if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) - continue - } - - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) - endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ - Timeout: endpoint.Timeout, - Threshold: endpoint.Threshold, - Backoff: endpoint.Backoff, - Headers: endpoint.Headers, - }) - - sinks = append(sinks, endpoint) - } - - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as - // replacing broadcaster with a rabbitmq implementation. It's recommended - // that the registry instances also act as the workers to keep deployment - // simple. - app.events.sink = notifications.NewBroadcaster(sinks...) - - // Populate registry event source - hostname, err := os.Hostname() - if err != nil { - hostname = configuration.HTTP.Addr - } else { - // try to pick the port off the config - _, port, err := net.SplitHostPort(configuration.HTTP.Addr) - if err == nil { - hostname = net.JoinHostPort(hostname, port) - } - } - - app.events.source = notifications.SourceRecord{ - Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), - } -} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") - return - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) - - done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, "redis.connect.startedat")) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.DialTimeout("tcp", - configuration.Redis.Addr, - configuration.Redis.DialTimeout, - configuration.Redis.ReadTimeout, - configuration.Redis.WriteTimeout) - if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - if configuration.Redis.Password != "" { - if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - app.redis = pool - - // setup expvar - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { - return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), - } - })) -} - -// configureLogHook prepares logging hook parameters. -func (app *App) configureLogHook(configuration *configuration.Configuration) { - entry, ok := ctxu.GetLogger(app).(*log.Entry) - if !ok { - // somehow, we are not using logrus - return - } - - logger := entry.Logger - - for _, configHook := range configuration.Log.Hooks { - if !configHook.Disabled { - switch configHook.Type { - case "mail": - hook := &logHook{} - hook.LevelsParam = configHook.Levels - hook.Mail = &mailer{ - Addr: configHook.MailOptions.SMTP.Addr, - Username: configHook.MailOptions.SMTP.Username, - Password: configHook.MailOptions.SMTP.Password, - Insecure: configHook.MailOptions.SMTP.Insecure, - From: configHook.MailOptions.From, - To: configHook.MailOptions.To, - } - logger.Hooks.Add(hook) - default: - } - } - } -} - -// configureSecret creates a random secret if a secret wasn't included in the -// configuration. -func (app *App) configureSecret(configuration *configuration.Configuration) { - if configuration.HTTP.Secret == "" { - var secretBytes [randomSecretSize]byte - if _, err := cryptorand.Read(secretBytes[:]); err != nil { - panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) - } - configuration.HTTP.Secret = string(secretBytes[:]) - ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") - } -} - -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() // ensure that request body is always closed. - - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) - - defer func() { - status, ok := ctx.Value("http.response.status").(int) - if ok && status >= 200 && status <= 399 { - ctxu.GetResponseLogger(ctx).Infof("response completed") - } - }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } - - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - -// dispatchFunc takes a context and request and returns a constructed handler -// for the route. The dispatcher will use this to dynamically create request -// specific handlers for each endpoint without creating a new router for each -// request. -type dispatchFunc func(ctx *Context, r *http.Request) http.Handler - -// TODO(stevvooe): dispatchers should probably have some validation error -// chain with proper error reporting. - -// dispatcher returns a handler that constructs a request specific context and -// handler, using the dispatch factory function. -func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for headerName, headerValues := range app.Config.HTTP.Headers { - for _, value := range headerValues { - w.Header().Add(headerName, value) - } - } - - context := app.context(w, r) - - if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) - return - } - - // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) - - if app.nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) - - if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) - - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) - case distribution.ErrRepositoryNameInvalid: - context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) - } - - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - - // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - repository, - app.eventBridge(context, r)) - - context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) - if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - } - - dispatch(context, r).ServeHTTP(w, r) - // Automated error response handling here. Handlers may return their - // own errors if they need different behavior (such as range errors - // for layer upload). - if context.Errors.Len() > 0 { - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - - app.logError(context, context.Errors) - } - }) -} - -func (app *App) logError(context context.Context, errors errcode.Errors) { - for _, e1 := range errors { - var c ctxu.Context - - switch e1.(type) { - case errcode.Error: - e, _ := e1.(errcode.Error) - c = ctxu.WithValue(context, "err.code", e.Code) - c = ctxu.WithValue(c, "err.message", e.Code.Message()) - c = ctxu.WithValue(c, "err.detail", e.Detail) - case errcode.ErrorCode: - e, _ := e1.(errcode.ErrorCode) - c = ctxu.WithValue(context, "err.code", e) - c = ctxu.WithValue(c, "err.message", e.Message()) - default: - // just normal go 'error' - c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) - c = ctxu.WithValue(c, "err.message", e1.Error()) - } - - c = ctxu.WithLogger(c, ctxu.GetLogger(c, - "err.code", - "err.message", - "err.detail")) - ctxu.GetResponseLogger(c).Errorf("response completed with error") - } -} - -// context constructs the context object for the application. This only be -// called once per request. -func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, - "vars.name", - "vars.reference", - "vars.digest", - "vars.uuid")) - - context := &Context{ - App: app, - Context: ctx, - } - - if app.httpHost.Scheme != "" && app.httpHost.Host != "" { - // A "host" item in the configuration takes precedence over - // X-Forwarded-Proto and X-Forwarded-Host headers, and the - // hostname in the request. - context.urlBuilder = v2.NewURLBuilder(&app.httpHost) - } else { - context.urlBuilder = v2.NewURLBuilderFromRequest(r) - } - - return context -} - -// authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the context may access the requested -// repository. An error will be returned if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { - ctxu.GetLogger(context).Debug("authorizing request") - repo := getName(context) - - if app.accessController == nil { - return nil // access controller is not enabled. - } - - var accessRecords []auth.Access - - if repo != "" { - accessRecords = appendAccessRecords(accessRecords, r.Method, repo) - if fromRepo := r.FormValue("from"); fromRepo != "" { - // mounting a blob from one repository to another requires pull (GET) - // access to the source repository. - accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) - } - } else { - // Only allow the name not to be set on the base route. - if app.nameRequired(r) { - // For this to be properly secured, repo must always be set for a - // resource that may make a modification. The only condition under - // which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making - // that mistake elsewhere in the code, allowing any operation to - // proceed. - if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return fmt.Errorf("forbidden: no repository name") - } - accessRecords = appendCatalogAccessRecord(accessRecords, r) - } - - ctx, err := app.accessController.Authorized(context.Context, accessRecords...) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - // Add the appropriate WWW-Auth header - err.SetHeaders(w) - - if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - default: - // This condition is a potential security problem either in - // the configuration or whatever is backing the access - // controller. Just return a bad request with no information - // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - - return err - } - - // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context - // should be replaced by another, rather than replacing the context on a - // mutable object. - context.Context = ctx - return nil -} - -// eventBridge returns a bridge for the current request, configured with the -// correct actor and source. -func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - actor := notifications.ActorRecord{ - Name: getUserName(ctx, r), - } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) - - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) -} - -// nameRequired returns true if the route requires a name. -func (app *App) nameRequired(r *http.Request) bool { - route := mux.CurrentRoute(r) - routeName := route.GetName() - return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) -} - -// apiBase implements a simple yes-man for doing overall checks against the -// api. This can support auth roundtrips to support docker login. -func apiBase(w http.ResponseWriter, r *http.Request) { - const emptyJSON = "{}" - // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) - - fmt.Fprint(w, emptyJSON) -} - -// appendAccessRecords checks the method and adds the appropriate Access records to the records list. -func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch method { - case "GET", "HEAD": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - records = append(records, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return records -} - -// Add the access record for the catalog if it's our current route -func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { - route := mux.CurrentRoute(r) - routeName := route.GetName() - - if routeName == v2.RouteNameCatalog { - resource := auth.Resource{ - Type: "registry", - Name: "catalog", - } - - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return accessRecords -} - -// applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { - for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) - if err != nil { - return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) - } - registry = rmw - } - return registry, nil - -} - -// applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { - for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) - if err != nil { - return nil, err - } - repository = rmw - } - return repository, nil -} - -// applyStorageMiddleware wraps a storage driver with the configured middlewares -func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { - for _, mw := range middlewares { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) - if err != nil { - return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) - } - driver = smw - } - return driver, nil -} - -// uploadPurgeDefaultConfig provides a default configuration for upload -// purging to be used in the absence of configuration in the -// confifuration file -func uploadPurgeDefaultConfig() map[interface{}]interface{} { - config := map[interface{}]interface{}{} - config["enabled"] = true - config["age"] = "168h" - config["interval"] = "24h" - config["dryrun"] = false - return config -} - -func badPurgeUploadConfig(reason string) { - panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) -} - -// startUploadPurger schedules a goroutine which will periodically -// check upload directories for old files and delete them -func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { - if config["enabled"] == false { - return - } - - var purgeAgeDuration time.Duration - var err error - purgeAge, ok := config["age"] - if ok { - ageStr, ok := purgeAge.(string) - if !ok { - badPurgeUploadConfig("age is not a string") - } - purgeAgeDuration, err = time.ParseDuration(ageStr) - if err != nil { - badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) - } - } else { - badPurgeUploadConfig("age missing") - } - - var intervalDuration time.Duration - interval, ok := config["interval"] - if ok { - intervalStr, ok := interval.(string) - if !ok { - badPurgeUploadConfig("interval is not a string") - } - - intervalDuration, err = time.ParseDuration(intervalStr) - if err != nil { - badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) - } - } else { - badPurgeUploadConfig("interval missing") - } - - var dryRunBool bool - dryRun, ok := config["dryrun"] - if ok { - dryRunBool, ok = dryRun.(bool) - if !ok { - badPurgeUploadConfig("cannot parse dryrun") - } - } else { - badPurgeUploadConfig("dryrun missing") - } - - go func() { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute - log.Infof("Starting upload purge in %s", jitter) - time.Sleep(jitter) - - for { - storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) - log.Infof("Starting upload purge in %s", intervalDuration) - time.Sleep(intervalDuration) - } - }() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go deleted file mode 100644 index 8727a3cd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.4 - -package handlers - -import ( - "net/http" -) - -func basicAuth(r *http.Request) (username, password string, ok bool) { - return r.BasicAuth() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go deleted file mode 100644 index 6cf10a25..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.4 - -package handlers - -import ( - "encoding/base64" - "net/http" - "strings" -) - -// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we -// can compile on go1.3 and earlier. - -// BasicAuth returns the username and password provided in the request's -// Authorization header, if the request uses HTTP Basic Authentication. -// See RFC 2617, Section 2. -func basicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - return parseBasicAuth(auth) -} - -// parseBasicAuth parses an HTTP Basic Authentication string. -// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). -func parseBasicAuth(auth string) (username, password string, ok bool) { - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go deleted file mode 100644 index fb250acd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go +++ /dev/null @@ -1,99 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// blobDispatcher uses the request context to build a blobHandler. -func blobDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := getDigest(ctx) - if err != nil { - - if err == errDigestNotAvailable { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - }) - } - - blobHandler := &blobHandler{ - Context: ctx, - Digest: dgst, - } - - mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(blobHandler.GetBlob), - "HEAD": http.HandlerFunc(blobHandler.GetBlob), - } - - if !ctx.readOnly { - mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) - } - - return mhandler -} - -// blobHandler serves http blob requests. -type blobHandler struct { - *Context - - Digest digest.Digest -} - -// GetBlob fetches the binary data from backend storage returns it in the -// response. -func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { - context.GetLogger(bh).Debug("GetBlob") - blobs := bh.Repository.Blobs(bh) - desc, err := blobs.Stat(bh, bh.Digest) - if err != nil { - if err == distribution.ErrBlobUnknown { - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) - } else { - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { - context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// DeleteBlob deletes a layer blob -func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { - context.GetLogger(bh).Debug("DeleteBlob") - - blobs := bh.Repository.Blobs(bh) - err := blobs.Delete(bh, bh.Digest) - if err != nil { - switch err { - case distribution.ErrUnsupported: - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) - return - case distribution.ErrBlobUnknown: - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) - return - default: - bh.Errors = append(bh.Errors, err) - context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) - return - } - } - - w.Header().Set("Content-Length", "0") - w.WriteHeader(http.StatusAccepted) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go deleted file mode 100644 index 1e3bff95..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go +++ /dev/null @@ -1,385 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "net/url" - "os" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" - "github.com/gorilla/handlers" -) - -// blobUploadDispatcher constructs and returns the blob upload handler for the -// given request context. -func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - buh := &blobUploadHandler{ - Context: ctx, - UUID: getUploadUUID(ctx), - } - - handler := handlers.MethodHandler{ - "GET": http.HandlerFunc(buh.GetUploadStatus), - "HEAD": http.HandlerFunc(buh.GetUploadStatus), - } - - if !ctx.readOnly { - handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) - handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) - handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) - handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) - } - - if buh.UUID != "" { - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) - if err != nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - buh.State = state - - if state.Name != ctx.Repository.Name() { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - - if state.UUID != buh.UUID { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - - blobs := ctx.Repository.Blobs(buh) - upload, err := blobs.Resume(buh, buh.UUID) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == distribution.ErrBlobUploadUnknown { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - }) - } - buh.Upload = upload - - if state.Offset > 0 { - // Seek the blob upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } else if nn != buh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } - } - - return closeResources(handler, buh.Upload) - } - - return handler -} - -// blobUploadHandler handles the http blob upload process. -type blobUploadHandler struct { - *Context - - // UUID identifies the upload instance for the current request. Using UUID - // to key blob writers since this implementation uses UUIDs. - UUID string - - Upload distribution.BlobWriter - - State blobUploadState -} - -// StartBlobUpload begins the blob upload process and allocates a server-side -// blob writer session, optionally mounting the blob from a separate repository. -func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { - var options []distribution.BlobCreateOption - - fromRepo := r.FormValue("from") - mountDigest := r.FormValue("mount") - - if mountDigest != "" && fromRepo != "" { - opt, err := buh.createBlobMountOption(fromRepo, mountDigest) - if opt != nil && err == nil { - options = append(options, opt) - } - } - - blobs := buh.Repository.Blobs(buh) - upload, err := blobs.Create(buh, options...) - - if err != nil { - if ebm, ok := err.(distribution.ErrBlobMounted); ok { - if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - } else if err == distribution.ErrUnsupported { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) - } else { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - buh.Upload = upload - defer buh.Upload.Close() - - if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) - w.WriteHeader(http.StatusAccepted) -} - -// GetUploadStatus returns the status of a given upload, identified by id. -func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - // TODO(dmcgowan): Set last argument to false in blobUploadResponse when - // resumable upload is supported. This will enable returning a non-zero - // range for clients to begin uploading at an offset. - if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - w.WriteHeader(http.StatusNoContent) -} - -// PatchBlobData writes data to an upload. -func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - ct := r.Header.Get("Content-Type") - if ct != "" && ct != "application/octet-stream" { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) - // TODO(dmcgowan): encode error - return - } - - // TODO(dmcgowan): support Content-Range header to seek and write range - - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - if err := buh.blobUploadResponse(w, r, false); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.WriteHeader(http.StatusAccepted) -} - -// PutBlobUploadComplete takes the final request of a blob upload. The -// request may include all the blob data or no blob data. Any data -// provided is received and verified. If successful, the blob is linked -// into the blob store and 201 Created is returned with the canonical -// url of the blob. -func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) - return - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) - return - } - - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ - Digest: dgst, - - // TODO(stevvooe): This isn't wildly important yet, but we should - // really set the length and mediatype. For now, we can let the - // backend take care of this. - }) - - if err != nil { - switch err := err.(type) { - case distribution.ErrBlobInvalidDigest: - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - default: - switch err { - case distribution.ErrUnsupported: - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) - case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - default: - ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - } - - // Clean up the backend blob data if there was an error. - if err := buh.Upload.Cancel(buh); err != nil { - // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) - } - - return - } - if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// CancelBlobUpload cancels an in-progress upload of a blob. -func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - if err := buh.Upload.Cancel(buh); err != nil { - ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - w.WriteHeader(http.StatusNoContent) -} - -// blobUploadResponse provides a standard request for uploading blobs and -// chunk responses. This sets the correct headers but the response status is -// left to the caller. The fresh argument is used to ensure that new blob -// uploads always start at a 0 offset. This allows disabling resumable push by -// always returning a 0 offset on check status. -func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - - var offset int64 - if !fresh { - var err error - offset, err = buh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) - return err - } - } - - // TODO(stevvooe): Need a better way to manage the upload state automatically. - buh.State.Name = buh.Repository.Name() - buh.State.UUID = buh.Upload.ID() - buh.State.Offset = offset - buh.State.StartedAt = buh.Upload.StartedAt() - - token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) - if err != nil { - ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) - return err - } - - uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( - buh.Repository.Name(), buh.Upload.ID(), - url.Values{ - "_state": []string{token}, - }) - if err != nil { - ctxu.GetLogger(buh).Infof("error building upload url: %s", err) - return err - } - - endRange := offset - if endRange > 0 { - endRange = endRange - 1 - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - w.Header().Set("Location", uploadURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) - - return nil -} - -// mountBlob attempts to mount a blob from another repository by its digest. If -// successful, the blob is linked into the blob store and 201 Created is -// returned with the canonical url of the blob. -func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { - dgst, err := digest.ParseDigest(mountDigest) - if err != nil { - return nil, err - } - - ref, err := reference.ParseNamed(fromRepo) - if err != nil { - return nil, err - } - - canonical, err := reference.WithDigest(ref, dgst) - if err != nil { - return nil, err - } - - return storage.WithMountFrom(canonical), nil -} - -// writeBlobCreatedHeaders writes the standard headers describing a newly -// created blob. A 201 Created is written as well as the canonical URL and -// blob digest. -func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { - blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) - if err != nil { - return err - } - - w.Header().Set("Location", blobURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - w.WriteHeader(http.StatusCreated) - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go deleted file mode 100644 index 6ec1fe55..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go +++ /dev/null @@ -1,95 +0,0 @@ -package handlers - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/gorilla/handlers" -) - -const maximumReturnedEntries = 100 - -func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { - catalogHandler := &catalogHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(catalogHandler.GetCatalog), - } -} - -type catalogHandler struct { - *Context -} - -type catalogAPIResponse struct { - Repositories []string `json:"repositories"` -} - -func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { - var moreEntries = true - - q := r.URL.Query() - lastEntry := q.Get("last") - maxEntries, err := strconv.Atoi(q.Get("n")) - if err != nil || maxEntries < 0 { - maxEntries = maximumReturnedEntries - } - - repos := make([]string, maxEntries) - - filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) - if err == io.EOF { - moreEntries = false - } else if err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - // Add a link header if there are more entries to retrieve - if moreEntries { - lastEntry = repos[len(repos)-1] - urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) - if err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - w.Header().Set("Link", urlStr) - } - - enc := json.NewEncoder(w) - if err := enc.Encode(catalogAPIResponse{ - Repositories: repos[0:filled], - }); err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// Use the original URL from the request to create a new URL for -// the link header -func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { - calledURL, err := url.Parse(origURL) - if err != nil { - return "", err - } - - v := url.Values{} - v.Add("n", strconv.Itoa(maxEntries)) - v.Add("last", lastEntry) - - calledURL.RawQuery = v.Encode() - - calledURL.Fragment = "" - urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) - - return urlStr, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go deleted file mode 100644 index 85a17123..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go +++ /dev/null @@ -1,151 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "sync" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "golang.org/x/net/context" -) - -// Context should contain the request specific context for use in across -// handlers. Resources that don't need to be shared across handlers should not -// be on this object. -type Context struct { - // App points to the application structure that created this context. - *App - context.Context - - // Repository is the repository for the current request. All requests - // should be scoped to a single repository. This field may be nil. - Repository distribution.Repository - - // Errors is a collection of errors encountered during the request to be - // returned to the client API. If errors are added to the collection, the - // handler *must not* start the response via http.ResponseWriter. - Errors errcode.Errors - - urlBuilder *v2.URLBuilder - - // TODO(stevvooe): The goal is too completely factor this context and - // dispatching out of the web application. Ideally, we should lean on - // context.Context for injection of these resources. -} - -// Value overrides context.Context.Value to ensure that calls are routed to -// correct context. -func (ctx *Context) Value(key interface{}) interface{} { - return ctx.Context.Value(key) -} - -func getName(ctx context.Context) (name string) { - return ctxu.GetStringValue(ctx, "vars.name") -} - -func getReference(ctx context.Context) (reference string) { - return ctxu.GetStringValue(ctx, "vars.reference") -} - -var errDigestNotAvailable = fmt.Errorf("digest not available in context") - -func getDigest(ctx context.Context) (dgst digest.Digest, err error) { - dgstStr := ctxu.GetStringValue(ctx, "vars.digest") - - if dgstStr == "" { - ctxu.GetLogger(ctx).Errorf("digest not available") - return "", errDigestNotAvailable - } - - d, err := digest.ParseDigest(dgstStr) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) - return "", err - } - - return d, nil -} - -func getUploadUUID(ctx context.Context) (uuid string) { - return ctxu.GetStringValue(ctx, "vars.uuid") -} - -// getUserName attempts to resolve a username from the context and request. If -// a username cannot be resolved, the empty string is returned. -func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, "auth.user.name") - - // Fallback to request user with basic auth - if username == "" { - var ok bool - uname, _, ok := basicAuth(r) - if ok { - username = uname - } - } - - return username -} - -// contextManager allows us to associate net/context.Context instances with a -// request, based on the memory identity of http.Request. This prepares http- -// level context, which is not application specific. If this is called, -// (*contextManager).release must be called on the context when the request is -// completed. -// -// Providing this circumvents a lot of necessity for dispatchers with the -// benefit of instantiating the request context much earlier. -// -// TODO(stevvooe): Consider making this facility a part of the context package. -type contextManager struct { - contexts map[*http.Request]context.Context - mu sync.Mutex -} - -// defaultContextManager is just a global instance to register request contexts. -var defaultContextManager = newContextManager() - -func newContextManager() *contextManager { - return &contextManager{ - contexts: make(map[*http.Request]context.Context), - } -} - -// context either returns a new context or looks it up in the manager. -func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { - cm.mu.Lock() - defer cm.mu.Unlock() - - ctx, ok := cm.contexts[r] - if ok { - return ctx - } - - if parent == nil { - parent = ctxu.Background() - } - - ctx = ctxu.WithRequest(parent, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) - cm.contexts[r] = ctx - - return ctx -} - -// releases frees any associated with resources from request. -func (cm *contextManager) release(ctx context.Context) { - cm.mu.Lock() - defer cm.mu.Unlock() - - r, err := ctxu.GetRequest(ctx) - if err != nil { - ctxu.GetLogger(ctx).Errorf("no request found in context during release") - return - } - delete(cm.contexts, r) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go deleted file mode 100644 index 5a3c9984..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go +++ /dev/null @@ -1,62 +0,0 @@ -package handlers - -import ( - "errors" - "io" - "net/http" - - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/errcode" -) - -// closeResources closes all the provided resources after running the target -// handler. -func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for _, closer := range closers { - defer closer.Close() - } - handler.ServeHTTP(w, r) - }) -} - -// copyFullPayload copies the payload of a HTTP request to destWriter. If it -// receives less content than expected, and the client disconnected during the -// upload, it avoids sending a 400 error to keep the logs cleaner. -func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { - // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := responseWriter.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) - } - - // Read in the data, if any. - copied, err := io.Copy(destWriter, r.Body) - if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client - // disconnect during the request? If so, avoid returning a 400 - // error to keep the logs cleaner. - select { - case <-clientClosed: - // Set the response code to "499 Client Closed Request" - // Even though the connection has already been closed, - // this causes the logger to pick up a 499 error - // instead of showing 0 for the HTTP status. - responseWriter.WriteHeader(499) - - ctxu.GetLogger(context).Error("client disconnected during " + action) - return errors.New("client disconnected") - default: - } - } - - if err != nil { - ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) - *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go deleted file mode 100644 index 1725d240..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go +++ /dev/null @@ -1,72 +0,0 @@ -package handlers - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "time" -) - -// blobUploadState captures the state serializable state of the blob upload. -type blobUploadState struct { - // name is the primary repository under which the blob will be linked. - Name string - - // UUID identifies the upload. - UUID string - - // offset contains the current progress of the upload. - Offset int64 - - // StartedAt is the original start time of the upload. - StartedAt time.Time -} - -type hmacKey string - -// unpackUploadState unpacks and validates the blob upload state from the -// token, using the hmacKey secret. -func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { - var state blobUploadState - - tokenBytes, err := base64.URLEncoding.DecodeString(token) - if err != nil { - return state, err - } - mac := hmac.New(sha256.New, []byte(secret)) - - if len(tokenBytes) < mac.Size() { - return state, fmt.Errorf("Invalid token") - } - - macBytes := tokenBytes[:mac.Size()] - messageBytes := tokenBytes[mac.Size():] - - mac.Write(messageBytes) - if !hmac.Equal(mac.Sum(nil), macBytes) { - return state, fmt.Errorf("Invalid token") - } - - if err := json.Unmarshal(messageBytes, &state); err != nil { - return state, err - } - - return state, nil -} - -// packUploadState packs the upload state signed with and hmac digest using -// the hmacKey secret, encoding to url safe base64. The resulting token can be -// used to share data with minimized risk of external tampering. -func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { - mac := hmac.New(sha256.New, []byte(secret)) - p, err := json.Marshal(lus) - if err != nil { - return "", err - } - - mac.Write(p) - - return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go deleted file mode 100644 index 7bbab4f8..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "bytes" - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Sirupsen/logrus" -) - -// logHook is for hooking Panic in web application -type logHook struct { - LevelsParam []string - Mail *mailer -} - -// Fire forwards an error to LogHook -func (hook *logHook) Fire(entry *logrus.Entry) error { - addr := strings.Split(hook.Mail.Addr, ":") - if len(addr) != 2 { - return errors.New("Invalid Mail Address") - } - host := addr[0] - subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) - - html := ` - {{.Message}} - - {{range $key, $value := .Data}} - {{$key}}: {{$value}} - {{end}} - ` - b := bytes.NewBuffer(make([]byte, 0)) - t := template.Must(template.New("mail body").Parse(html)) - if err := t.Execute(b, entry); err != nil { - return err - } - body := fmt.Sprintf("%s", b) - - return hook.Mail.sendMail(subject, body) -} - -// Levels contains hook levels to be catched -func (hook *logHook) Levels() []logrus.Level { - levels := []logrus.Level{} - for _, v := range hook.LevelsParam { - lv, _ := logrus.ParseLevel(v) - levels = append(levels, lv) - } - return levels -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go deleted file mode 100644 index 51156d3b..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go +++ /dev/null @@ -1,338 +0,0 @@ -package handlers - -import ( - "bytes" - "fmt" - "net/http" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// These constants determine which architecture and OS to choose from a -// manifest list when downconverting it to a schema1 manifest. -const ( - defaultArch = "amd64" - defaultOS = "linux" -) - -// imageManifestDispatcher takes the request context and builds the -// appropriate handler for handling image manifest requests. -func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { - imageManifestHandler := &imageManifestHandler{ - Context: ctx, - } - reference := getReference(ctx) - dgst, err := digest.ParseDigest(reference) - if err != nil { - // We just have a tag - imageManifestHandler.Tag = reference - } else { - imageManifestHandler.Digest = dgst - } - - mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), - "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), - } - - if !ctx.readOnly { - mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) - mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) - } - - return mhandler -} - -// imageManifestHandler handles http operations on image manifests. -type imageManifestHandler struct { - *Context - - // One of tag or digest gets set, depending on what is present in context. - Tag string - Digest digest.Digest -} - -// GetImageManifest fetches the image manifest from the storage backend, if it exists. -func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var manifest distribution.Manifest - if imh.Tag != "" { - tags := imh.Repository.Tags(imh) - desc, err := tags.Get(imh, imh.Tag) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - imh.Digest = desc.Digest - } - - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - - manifest, err = manifests.Get(imh, imh.Digest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - - supportsSchema2 := false - supportsManifestList := false - if acceptHeaders, ok := r.Header["Accept"]; ok { - for _, mediaType := range acceptHeaders { - if mediaType == schema2.MediaTypeManifest { - supportsSchema2 = true - } - if mediaType == manifestlist.MediaTypeManifestList { - supportsManifestList = true - } - } - } - - schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) - manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) - - // Only rewrite schema2 manifests when they are being fetched by tag. - // If they are being fetched by digest, we can't return something not - // matching the digest. - if imh.Tag != "" && isSchema2 && !supportsSchema2 { - // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) - - manifest, err = imh.convertSchema2Manifest(schema2Manifest) - if err != nil { - return - } - } else if imh.Tag != "" && isManifestList && !supportsManifestList { - // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) - - // Find the image manifest corresponding to the default - // platform - var manifestDigest digest.Digest - for _, manifestDescriptor := range manifestList.Manifests { - if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { - manifestDigest = manifestDescriptor.Digest - break - } - } - - if manifestDigest == "" { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - return - } - - manifest, err = manifests.Get(imh, manifestDigest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - - // If necessary, convert the image manifest - if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { - manifest, err = imh.convertSchema2Manifest(schema2Manifest) - if err != nil { - return - } - } - } - - ct, p, err := manifest.Payload() - if err != nil { - return - } - - w.Header().Set("Content-Type", ct) - w.Header().Set("Content-Length", fmt.Sprint(len(p))) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) - w.Write(p) -} - -func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { - targetDescriptor := schema2Manifest.Target() - blobs := imh.Repository.Blobs(imh) - configJSON, err := blobs.Get(imh, targetDescriptor.Digest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return nil, err - } - - builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, imh.Repository.Name(), imh.Tag, configJSON) - for _, d := range schema2Manifest.References() { - if err := builder.AppendReference(d); err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return nil, err - } - } - manifest, err := builder.Build(imh) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return nil, err - } - - return manifest, nil -} - -func etagMatch(r *http.Request, etag string) bool { - for _, headerVal := range r.Header["If-None-Match"] { - if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted - return true - } - } - return false -} - -// PutImageManifest validates and stores an image in the registry. -func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("PutImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var jsonBuf bytes.Buffer - if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - mediaType := r.Header.Get("Content-Type") - manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return - } - - if imh.Digest != "" { - if desc.Digest != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - return - } - } else if imh.Tag != "" { - imh.Digest = desc.Digest - } else { - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) - return - } - - _, err = manifests.Put(imh, manifest) - if err != nil { - // TODO(stevvooe): These error handling switches really need to be - // handled by an app global mapper. - if err == distribution.ErrUnsupported { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) - return - } - switch err := err.(type) { - case distribution.ErrManifestVerification: - for _, verificationError := range err { - switch verificationError := verificationError.(type) { - case distribution.ErrManifestBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) - case distribution.ErrManifestNameInvalid: - imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) - case distribution.ErrManifestUnverified: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) - default: - if verificationError == digest.ErrDigestInvalidFormat { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) - } - } - } - default: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - return - } - - // Tag this manifest - if imh.Tag != "" { - tags := imh.Repository.Tags(imh) - err = tags.Tag(imh, imh.Tag, desc) - if err != nil { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - } - - // Construct a canonical url for the uploaded manifest. - location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) - if err != nil { - // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to - // happen. We'll log the error here but proceed as if it worked. Worst - // case, we set an empty location header. - ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) - } - - w.Header().Set("Location", location) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.WriteHeader(http.StatusCreated) -} - -// DeleteImageManifest removes the manifest with the given digest from the registry. -func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("DeleteImageManifest") - - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - err = manifests.Delete(imh, imh.Digest) - if err != nil { - switch err { - case digest.ErrDigestUnsupported: - case digest.ErrDigestInvalidFormat: - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - return - case distribution.ErrBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - return - case distribution.ErrUnsupported: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) - return - default: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) - return - } - } - - tagService := imh.Repository.Tags(imh) - referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - for _, tag := range referencedTags { - if err := tagService.Untag(imh, tag); err != nil { - imh.Errors = append(imh.Errors, err) - return - } - } - - w.WriteHeader(http.StatusAccepted) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go deleted file mode 100644 index 39244909..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go +++ /dev/null @@ -1,45 +0,0 @@ -package handlers - -import ( - "errors" - "net/smtp" - "strings" -) - -// mailer provides fields of email configuration for sending. -type mailer struct { - Addr, Username, Password, From string - Insecure bool - To []string -} - -// sendMail allows users to send email, only if mail parameters is configured correctly. -func (mail *mailer) sendMail(subject, message string) error { - addr := strings.Split(mail.Addr, ":") - if len(addr) != 2 { - return errors.New("Invalid Mail Address") - } - host := addr[0] - msg := []byte("To:" + strings.Join(mail.To, ";") + - "\r\nFrom: " + mail.From + - "\r\nSubject: " + subject + - "\r\nContent-Type: text/plain\r\n\r\n" + - message) - auth := smtp.PlainAuth( - "", - mail.Username, - mail.Password, - host, - ) - err := smtp.SendMail( - mail.Addr, - auth, - mail.From, - mail.To, - []byte(msg), - ) - if err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go deleted file mode 100644 index d9f0106c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go +++ /dev/null @@ -1,60 +0,0 @@ -package handlers - -import ( - "encoding/json" - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// tagsDispatcher constructs the tags handler api endpoint. -func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { - tagsHandler := &tagsHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(tagsHandler.GetTags), - } -} - -// tagsHandler handles requests for lists of tags under a repository name. -type tagsHandler struct { - *Context -} - -type tagsAPIResponse struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// GetTags returns a json list of tags for a specific image name. -func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - tagService := th.Repository.Tags(th) - tags, err := tagService.All(th) - if err != nil { - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) - default: - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - enc := json.NewEncoder(w) - if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Name(), - Tags: tags, - }); err != nil { - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go deleted file mode 100644 index b93a7a63..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go +++ /dev/null @@ -1,74 +0,0 @@ -package listener - -import ( - "fmt" - "net" - "os" - "time" -) - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -// it is a plain copy-paste from net/http/server.go -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -// NewListener announces on laddr and net. Accepted values of the net are -// 'unix' and 'tcp' -func NewListener(net, laddr string) (net.Listener, error) { - switch net { - case "unix": - return newUnixListener(laddr) - case "tcp", "": // an empty net means tcp - return newTCPListener(laddr) - default: - return nil, fmt.Errorf("unknown address type %s", net) - } -} - -func newUnixListener(laddr string) (net.Listener, error) { - fi, err := os.Stat(laddr) - if err == nil { - // the file exists. - // try to remove it if it's a socket - if !isSocket(fi.Mode()) { - return nil, fmt.Errorf("file %s exists and is not a socket", laddr) - } - - if err := os.Remove(laddr); err != nil { - return nil, err - } - } else if !os.IsNotExist(err) { - // we can't do stat on the file. - // it means we can not remove it - return nil, err - } - - return net.Listen("unix", laddr) -} - -func isSocket(m os.FileMode) bool { - return m&os.ModeSocket != 0 -} - -func newTCPListener(laddr string) (net.Listener, error) { - ln, err := net.Listen("tcp", laddr) - if err != nil { - return nil, err - } - - return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go deleted file mode 100644 index 7535c6db..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go +++ /dev/null @@ -1,40 +0,0 @@ -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// InitFunc is the type of a RegistryMiddleware factory function and is -// used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RegistryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(ctx, registry, options) - } - } - - return nil, fmt.Errorf("no registry middleware registered with name: %s", name) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go deleted file mode 100644 index 27b42aec..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go +++ /dev/null @@ -1,40 +0,0 @@ -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// InitFunc is the type of a RepositoryMiddleware factory function and is -// used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RepositoryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(ctx, repository, options) - } - } - - return nil, fmt.Errorf("no repository middleware registered with name: %s", name) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go deleted file mode 100644 index e4bec75a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go +++ /dev/null @@ -1,54 +0,0 @@ -package proxy - -import ( - "net/http" - "net/url" - - "github.com/docker/distribution/registry/client/auth" -) - -const tokenURL = "https://auth.docker.io/token" - -type userpass struct { - username string - password string -} - -type credentials struct { - creds map[string]userpass -} - -func (c credentials) Basic(u *url.URL) (string, string) { - up := c.creds[u.String()] - - return up.username, up.password -} - -// ConfigureAuth authorizes with the upstream registry -func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { - if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { - return nil, err - } - - creds := map[string]userpass{ - tokenURL: { - username: username, - password: password, - }, - } - return credentials{creds: creds}, nil -} - -func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { - resp, err := http.Get(endpoint) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := manager.AddResponse(resp); err != nil { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go deleted file mode 100644 index 41b76e8e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go +++ /dev/null @@ -1,186 +0,0 @@ -package proxy - -import ( - "io" - "net/http" - "strconv" - "sync" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/proxy/scheduler" -) - -// todo(richardscothern): from cache control header or config file -const blobTTL = time.Duration(24 * 7 * time.Hour) - -type proxyBlobStore struct { - localStore distribution.BlobStore - remoteStore distribution.BlobService - scheduler *scheduler.TTLExpirationScheduler -} - -var _ distribution.BlobStore = &proxyBlobStore{} - -// inflight tracks currently downloading blobs -var inflight = make(map[digest.Digest]struct{}) - -// mu protects inflight -var mu sync.Mutex - -func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { - w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) - w.Header().Set("Content-Type", mediaType) - w.Header().Set("Docker-Content-Digest", digest.String()) - w.Header().Set("Etag", digest.String()) -} - -func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { - desc, err := pbs.remoteStore.Stat(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - if w, ok := writer.(http.ResponseWriter); ok { - setResponseHeaders(w, desc.Size, desc.MediaType, dgst) - } - - remoteReader, err := pbs.remoteStore.Open(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - _, err = io.CopyN(writer, remoteReader, desc.Size) - if err != nil { - return distribution.Descriptor{}, err - } - - proxyMetrics.BlobPush(uint64(desc.Size)) - - return desc, nil -} - -func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { - localDesc, err := pbs.localStore.Stat(ctx, dgst) - if err != nil { - // Stat can report a zero sized file here if it's checked between creation - // and population. Return nil error, and continue - return false, nil - } - - if err == nil { - proxyMetrics.BlobPush(uint64(localDesc.Size)) - return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) - } - - return false, nil - -} - -func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { - defer func() { - mu.Lock() - delete(inflight, dgst) - mu.Unlock() - }() - - var desc distribution.Descriptor - var err error - var bw distribution.BlobWriter - - bw, err = pbs.localStore.Create(ctx) - if err != nil { - return err - } - - desc, err = pbs.copyContent(ctx, dgst, bw) - if err != nil { - return err - } - - _, err = bw.Commit(ctx, desc) - if err != nil { - return err - } - - return nil -} - -func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - served, err := pbs.serveLocal(ctx, w, r, dgst) - if err != nil { - context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) - return err - } - - if served { - return nil - } - - mu.Lock() - _, ok := inflight[dgst] - if ok { - mu.Unlock() - _, err := pbs.copyContent(ctx, dgst, w) - return err - } - inflight[dgst] = struct{}{} - mu.Unlock() - - go func(dgst digest.Digest) { - if err := pbs.storeLocal(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) - } - pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) - }(dgst) - - _, err = pbs.copyContent(ctx, dgst, w) - if err != nil { - return err - } - return nil -} - -func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := pbs.localStore.Stat(ctx, dgst) - if err == nil { - return desc, err - } - - if err != distribution.ErrBlobUnknown { - return distribution.Descriptor{}, err - } - - return pbs.remoteStore.Stat(ctx, dgst) -} - -// Unsupported functions -func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - return distribution.Descriptor{}, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { - return distribution.Descriptor{}, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go deleted file mode 100644 index 13cb5f6b..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go +++ /dev/null @@ -1,86 +0,0 @@ -package proxy - -import ( - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/proxy/scheduler" -) - -// todo(richardscothern): from cache control header or config -const repositoryTTL = time.Duration(24 * 7 * time.Hour) - -type proxyManifestStore struct { - ctx context.Context - localManifests distribution.ManifestService - remoteManifests distribution.ManifestService - repositoryName string - scheduler *scheduler.TTLExpirationScheduler -} - -var _ distribution.ManifestService = &proxyManifestStore{} - -func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - exists, err := pms.localManifests.Exists(ctx, dgst) - if err != nil { - return false, err - } - if exists { - return true, nil - } - - return pms.remoteManifests.Exists(ctx, dgst) -} - -func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - // At this point `dgst` was either specified explicitly, or returned by the - // tagstore with the most recent association. - var fromRemote bool - manifest, err := pms.localManifests.Get(ctx, dgst, options...) - if err != nil { - manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) - if err != nil { - return nil, err - } - fromRemote = true - } - - _, payload, err := manifest.Payload() - if err != nil { - return nil, err - } - - proxyMetrics.ManifestPush(uint64(len(payload))) - if fromRemote { - proxyMetrics.ManifestPull(uint64(len(payload))) - - _, err = pms.localManifests.Put(ctx, manifest) - if err != nil { - return nil, err - } - - // Schedule the repo for removal - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) - - // Ensure the manifest blob is cleaned up - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - } - - return manifest, err -} - -func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - var d digest.Digest - return d, distribution.ErrUnsupported -} - -func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} - -/*func (pms proxyManifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - return 0, distribution.ErrUnsupported -} -*/ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go deleted file mode 100644 index d3d84d78..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go +++ /dev/null @@ -1,74 +0,0 @@ -package proxy - -import ( - "expvar" - "sync/atomic" -) - -// Metrics is used to hold metric counters -// related to the proxy -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 - BytesPulled uint64 - BytesPushed uint64 -} - -type proxyMetricsCollector struct { - blobMetrics Metrics - manifestMetrics Metrics -} - -// BlobPull tracks metrics about blobs pulled into the cache -func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { - atomic.AddUint64(&pmc.blobMetrics.Misses, 1) - atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) -} - -// BlobPush tracks metrics about blobs pushed to clients -func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { - atomic.AddUint64(&pmc.blobMetrics.Requests, 1) - atomic.AddUint64(&pmc.blobMetrics.Hits, 1) - atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) -} - -// ManifestPull tracks metrics related to Manifests pulled into the cache -func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { - atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) - atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) -} - -// ManifestPush tracks metrics about manifests pushed to clients -func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { - atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) - atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) - atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) -} - -// proxyMetrics tracks metrics about the proxy cache. This is -// kept globally and made available via expvar. -var proxyMetrics = &proxyMetricsCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - pm := registry.(*expvar.Map).Get("proxy") - if pm == nil { - pm = &expvar.Map{} - pm.(*expvar.Map).Init() - registry.(*expvar.Map).Set("proxy", pm) - } - - pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { - return proxyMetrics.blobMetrics - })) - - pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { - return proxyMetrics.manifestMetrics - })) - -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go deleted file mode 100644 index 8e1be5f2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go +++ /dev/null @@ -1,142 +0,0 @@ -package proxy - -import ( - "net/http" - "net/url" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/driver" -) - -// proxyingRegistry fetches content from a remote registry and caches it locally -type proxyingRegistry struct { - embedded distribution.Namespace // provides local registry functionality - - scheduler *scheduler.TTLExpirationScheduler - - remoteURL string - credentialStore auth.CredentialStore - challengeManager auth.ChallengeManager -} - -// NewRegistryPullThroughCache creates a registry acting as a pull through cache -func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { - _, err := url.Parse(config.RemoteURL) - if err != nil { - return nil, err - } - - v := storage.NewVacuum(ctx, driver) - - s := scheduler.New(ctx, driver, "/scheduler-state.json") - s.OnBlobExpire(func(digest string) error { - return v.RemoveBlob(digest) - }) - s.OnManifestExpire(func(repoName string) error { - return v.RemoveRepository(repoName) - }) - - err = s.Start() - if err != nil { - return nil, err - } - - challengeManager := auth.NewSimpleChallengeManager() - cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) - if err != nil { - return nil, err - } - - return &proxyingRegistry{ - embedded: registry, - scheduler: s, - challengeManager: challengeManager, - credentialStore: cs, - remoteURL: config.RemoteURL, - }, nil -} - -func (pr *proxyingRegistry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { - return pr.embedded.Repositories(ctx, repos, last) -} - -func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) - - localRepo, err := pr.embedded.Repository(ctx, name) - if err != nil { - return nil, err - } - localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) - if err != nil { - return nil, err - } - - remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) - if err != nil { - return nil, err - } - - remoteManifests, err := remoteRepo.Manifests(ctx) - if err != nil { - return nil, err - } - - return &proxiedRepository{ - blobStore: &proxyBlobStore{ - localStore: localRepo.Blobs(ctx), - remoteStore: remoteRepo.Blobs(ctx), - scheduler: pr.scheduler, - }, - manifests: proxyManifestStore{ - repositoryName: name, - localManifests: localManifests, // Options? - remoteManifests: remoteManifests, - ctx: ctx, - scheduler: pr.scheduler, - }, - name: name, - tags: proxyTagService{ - localTags: localRepo.Tags(ctx), - remoteTags: remoteRepo.Tags(ctx), - }, - }, nil -} - -// proxiedRepository uses proxying blob and manifest services to serve content -// locally, or pulling it through from a remote and caching it locally if it doesn't -// already exist -type proxiedRepository struct { - blobStore distribution.BlobStore - manifests distribution.ManifestService - name string - tags distribution.TagService -} - -func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - return pr.manifests, nil -} - -func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { - return pr.blobStore -} - -func (pr *proxiedRepository) Name() string { - return pr.name -} - -func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { - return pr.tags -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go deleted file mode 100644 index c52460c4..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go +++ /dev/null @@ -1,58 +0,0 @@ -package proxy - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// proxyTagService supports local and remote lookup of tags. -type proxyTagService struct { - localTags distribution.TagService - remoteTags distribution.TagService -} - -var _ distribution.TagService = proxyTagService{} - -// Get attempts to get the most recent digest for the tag by checking the remote -// tag service first and then caching it locally. If the remote is unavailable -// the local association is returned -func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - desc, err := pt.remoteTags.Get(ctx, tag) - if err == nil { - err := pt.localTags.Tag(ctx, tag, desc) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - desc, err = pt.localTags.Get(ctx, tag) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil -} - -func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - return distribution.ErrUnsupported -} - -func (pt proxyTagService) Untag(ctx context.Context, tag string) error { - err := pt.localTags.Untag(ctx, tag) - if err != nil { - return err - } - return nil -} - -func (pt proxyTagService) All(ctx context.Context) ([]string, error) { - tags, err := pt.remoteTags.All(ctx) - if err == nil { - return tags, err - } - return pt.localTags.All(ctx) -} - -func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - return []string{}, distribution.ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go deleted file mode 100644 index e91920a1..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go +++ /dev/null @@ -1,252 +0,0 @@ -package scheduler - -import ( - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// onTTLExpiryFunc is called when a repository's TTL expires -type expiryFunc func(string) error - -const ( - entryTypeBlob = iota - entryTypeManifest - indexSaveFrequency = 5 * time.Second -) - -// schedulerEntry represents an entry in the scheduler -// fields are exported for serialization -type schedulerEntry struct { - Key string `json:"Key"` - Expiry time.Time `json:"ExpiryData"` - EntryType int `json:"EntryType"` - - timer *time.Timer -} - -// New returns a new instance of the scheduler -func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { - return &TTLExpirationScheduler{ - entries: make(map[string]*schedulerEntry), - driver: driver, - pathToStateFile: path, - ctx: ctx, - stopped: true, - doneChan: make(chan struct{}), - saveTimer: time.NewTicker(indexSaveFrequency), - } -} - -// TTLExpirationScheduler is a scheduler used to perform actions -// when TTLs expire -type TTLExpirationScheduler struct { - sync.Mutex - - entries map[string]*schedulerEntry - - driver driver.StorageDriver - ctx context.Context - pathToStateFile string - - stopped bool - - onBlobExpire expiryFunc - onManifestExpire expiryFunc - - indexDirty bool - saveTimer *time.Ticker - doneChan chan struct{} -} - -// OnBlobExpire is called when a scheduled blob's TTL expires -func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { - ttles.Lock() - defer ttles.Unlock() - - ttles.onBlobExpire = f -} - -// OnManifestExpire is called when a scheduled manifest's TTL expires -func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { - ttles.Lock() - defer ttles.Unlock() - - ttles.onManifestExpire = f -} - -// AddBlob schedules a blob cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { - ttles.Lock() - defer ttles.Unlock() - - if ttles.stopped { - return fmt.Errorf("scheduler not started") - } - ttles.add(dgst, ttl, entryTypeBlob) - return nil -} - -// AddManifest schedules a manifest cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { - ttles.Lock() - defer ttles.Unlock() - - if ttles.stopped { - return fmt.Errorf("scheduler not started") - } - - ttles.add(repoName, ttl, entryTypeManifest) - return nil -} - -// Start starts the scheduler -func (ttles *TTLExpirationScheduler) Start() error { - ttles.Lock() - defer ttles.Unlock() - - err := ttles.readState() - if err != nil { - return err - } - - if !ttles.stopped { - return fmt.Errorf("Scheduler already started") - } - - context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") - ttles.stopped = false - - // Start timer for each deserialized entry - for _, entry := range ttles.entries { - entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) - } - - // Start a ticker to periodically save the entries index - - go func() { - for { - select { - case <-ttles.saveTimer.C: - if !ttles.indexDirty { - continue - } - - ttles.Lock() - err := ttles.writeState() - if err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } else { - ttles.indexDirty = false - } - ttles.Unlock() - - case <-ttles.doneChan: - return - } - } - }() - - return nil -} - -func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { - entry := &schedulerEntry{ - Key: key, - Expiry: time.Now().Add(ttl), - EntryType: eType, - } - context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) - if oldEntry, present := ttles.entries[key]; present && oldEntry.timer != nil { - oldEntry.timer.Stop() - } - ttles.entries[key] = entry - entry.timer = ttles.startTimer(entry, ttl) - ttles.indexDirty = true -} - -func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { - return time.AfterFunc(ttl, func() { - ttles.Lock() - defer ttles.Unlock() - - var f expiryFunc - - switch entry.EntryType { - case entryTypeBlob: - f = ttles.onBlobExpire - case entryTypeManifest: - f = ttles.onManifestExpire - default: - f = func(repoName string) error { - return fmt.Errorf("Unexpected scheduler entry type") - } - } - - if err := f(entry.Key); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) - } - - delete(ttles.entries, entry.Key) - ttles.indexDirty = true - }) -} - -// Stop stops the scheduler. -func (ttles *TTLExpirationScheduler) Stop() { - ttles.Lock() - defer ttles.Unlock() - - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - - for _, entry := range ttles.entries { - entry.timer.Stop() - } - - close(ttles.doneChan) - ttles.saveTimer.Stop() - ttles.stopped = true -} - -func (ttles *TTLExpirationScheduler) writeState() error { - jsonBytes, err := json.Marshal(ttles.entries) - if err != nil { - return err - } - - err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) - if err != nil { - return err - } - - return nil -} - -func (ttles *TTLExpirationScheduler) readState() error { - if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil - default: - return err - } - } - - bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) - if err != nil { - return err - } - - err = json.Unmarshal(bytes, &ttles.entries) - if err != nil { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go deleted file mode 100644 index 86cb6a17..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go +++ /dev/null @@ -1,337 +0,0 @@ -package registry - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/formatters/logstash" - "github.com/bugsnag/bugsnag-go" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/registry/handlers" - "github.com/docker/distribution/registry/listener" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution/version" - gorhandlers "github.com/gorilla/handlers" - "github.com/spf13/cobra" - "github.com/yvasiyarov/gorelic" -) - -// Cmd is a cobra command for running the registry. -var Cmd = &cobra.Command{ - Use: "registry ", - Short: "registry stores and distributes Docker images", - Long: "registry stores and distributes Docker images.", - Run: func(cmd *cobra.Command, args []string) { - if showVersion { - version.PrintVersion() - return - } - - // setup context - ctx := context.WithVersion(context.Background(), version.Version) - - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - if config.HTTP.Debug.Addr != "" { - go func(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } - }(config.HTTP.Debug.Addr) - } - - registry, err := NewRegistry(ctx, config) - if err != nil { - log.Fatalln(err) - } - - if err = registry.ListenAndServe(); err != nil { - log.Fatalln(err) - } - }, -} - -var showVersion bool - -func init() { - Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") -} - -// A Registry represents a complete instance of the registry. -// TODO(aaronl): It might make sense for Registry to become an interface. -type Registry struct { - config *configuration.Configuration - app *handlers.App - server *http.Server -} - -// NewRegistry creates a new registry from a context and configuration struct. -func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { - var err error - ctx, err = configureLogging(ctx, config) - if err != nil { - return nil, fmt.Errorf("error configuring logger: %v", err) - } - - // inject a logger into the uuid library. warns us if there is a problem - // with uuid generation under low entropy. - uuid.Loggerf = context.GetLogger(ctx).Warnf - - app := handlers.NewApp(ctx, config) - // TODO(aaronl): The global scope of the health checks means NewRegistry - // can only be called once per process. - app.RegisterHealthChecks() - handler := configureReporting(app) - handler = alive("/", handler) - handler = health.Handler(handler) - handler = panicHandler(handler) - handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - - server := &http.Server{ - Handler: handler, - } - - return &Registry{ - app: app, - config: config, - server: server, - }, nil -} - -// ListenAndServe runs the registry's HTTP server. -func (registry *Registry) ListenAndServe() error { - config := registry.config - - ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) - if err != nil { - return err - } - - if config.HTTP.TLS.Certificate != "" { - tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, - NextProtos: []string{"http/1.1"}, - Certificates: make([]tls.Certificate, 1), - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - } - - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) - if err != nil { - return err - } - - if len(config.HTTP.TLS.ClientCAs) != 0 { - pool := x509.NewCertPool() - - for _, ca := range config.HTTP.TLS.ClientCAs { - caPem, err := ioutil.ReadFile(ca) - if err != nil { - return err - } - - if ok := pool.AppendCertsFromPEM(caPem); !ok { - return fmt.Errorf("Could not add CA to pool") - } - } - - for _, subj := range pool.Subjects() { - context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) - } - - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool - } - - ln = tls.NewListener(ln, tlsConf) - context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) - } else { - context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) - } - - return registry.server.Serve(ln) -} - -func configureReporting(app *handlers.App) http.Handler { - var handler http.Handler = app - - if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - - handler = bugsnag.Handler(handler) - } - - if app.Config.Reporting.NewRelic.LicenseKey != "" { - agent := gorelic.NewAgent() - agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey - if app.Config.Reporting.NewRelic.Name != "" { - agent.NewrelicName = app.Config.Reporting.NewRelic.Name - } - agent.CollectHTTPStat = true - agent.Verbose = app.Config.Reporting.NewRelic.Verbose - agent.Run() - - handler = agent.WrapHTTPHandler(handler) - } - - return handler -} - -// configureLogging prepares the context with a logger using the -// configuration. -func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx)) - return ctx, nil - } - - log.SetLevel(logLevel(config.Log.Level)) - - formatter := config.Log.Formatter - if formatter == "" { - formatter = "text" // default formatter - } - - switch formatter { - case "json": - log.SetFormatter(&log.JSONFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "logstash": - log.SetFormatter(&logstash.LogstashFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - default: - // just let the library use default on empty string. - if config.Log.Formatter != "" { - return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) - } - } - - if config.Log.Formatter != "" { - log.Debugf("using %q logging formatter", config.Log.Formatter) - } - - if len(config.Log.Fields) > 0 { - // build up the static fields, if present. - var fields []interface{} - for k := range config.Log.Fields { - fields = append(fields, k) - } - - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) - } - - return ctx, nil -} - -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - l = log.InfoLevel - log.Warnf("error parsing level %q: %v, using %q ", level, err, l) - } - - return l -} - -// panicHandler add a HTTP handler to web app. The handler recover the happening -// panic. logrus.Panic transmits panic message to pre-config log hooks, which is -// defined in config.yml. -func panicHandler(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if err := recover(); err != nil { - log.Panic(fmt.Sprintf("%v", err)) - } - }() - handler.ServeHTTP(w, r) - }) -} - -// alive simply wraps the handler with a route that always returns an http 200 -// response when the path is matched. If the path is not matched, the request -// is passed to the provided handler. There is no guarantee of anything but -// that the server is up. Wrap with other handlers (such as health.Handler) -// for greater affect. -func alive(path string, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == path { - w.Header().Set("Cache-Control", "no-cache") - w.WriteHeader(http.StatusOK) - return - } - - handler.ServeHTTP(w, r) - }) -} - -func resolveConfiguration(args []string) (*configuration.Configuration, error) { - var configurationPath string - - if len(args) > 0 { - configurationPath = args[0] - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - defer fp.Close() - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go deleted file mode 100644 index fad0a77a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go +++ /dev/null @@ -1,60 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - - "github.com/docker/distribution/registry/storage/cache" -) - -type blobStatCollector struct { - metrics cache.Metrics -} - -func (bsc *blobStatCollector) Hit() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Hits, 1) -} - -func (bsc *blobStatCollector) Miss() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Misses, 1) -} - -func (bsc *blobStatCollector) Metrics() cache.Metrics { - return bsc.metrics -} - -// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor -// cache requests. Note this is kept globally and made available via expvar. -// For more detailed metrics, its recommend to instrument a particular cache -// implementation. -var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return blobStatterCacheMetrics - })) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go deleted file mode 100644 index 2655e011..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go +++ /dev/null @@ -1,78 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): This should configurable in the future. -const blobCacheControlMaxAge = 365 * 24 * time.Hour - -// blobServer simply serves blobs from a driver instance using a path function -// to identify paths and a descriptor service to fill in metadata. -type blobServer struct { - driver driver.StorageDriver - statter distribution.BlobStatter - pathFn func(dgst digest.Digest) (string, error) - redirect bool // allows disabling URLFor redirects -} - -func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return err - } - - path, err := bs.pathFn(desc.Digest) - if err != nil { - return err - } - - if bs.redirect { - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - switch err.(type) { - case nil: - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - return err - - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - default: - // Some unexpected error. - return err - } - } - - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { - return err - } - defer br.Close() - - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } - - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } - - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } - - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go deleted file mode 100644 index f8fe23fe..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go +++ /dev/null @@ -1,192 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// blobStore implements the read side of the blob store interface over a -// driver without enforcing per-repository membership. This object is -// intentionally a leaky abstraction, providing utility methods that support -// creating and traversing backend links. -type blobStore struct { - driver driver.StorageDriver - statter distribution.BlobStatter -} - -var _ distribution.BlobProvider = &blobStore{} - -// Get implements the BlobReadService.Get call. -func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - bp, err := bs.path(dgst) - if err != nil { - return nil, err - } - - p, err := bs.driver.GetContent(ctx, bp) - if err != nil { - switch err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUnknown - } - - return nil, err - } - - return p, err -} - -func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return nil, err - } - - path, err := bs.path(desc.Digest) - if err != nil { - return nil, err - } - - return newFileReader(ctx, bs.driver, path, desc.Size) -} - -// Put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations -func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - desc, err := bs.statter.Stat(ctx, dgst) - if err == nil { - // content already present - return desc, nil - } else if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) - // real error, return it - return distribution.Descriptor{}, err - } - - bp, err := bs.path(dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype here, as well. - - return distribution.Descriptor{ - Size: int64(len(p)), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, bs.driver.PutContent(ctx, bp, p) -} - -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return "", err - } - - return bp, nil -} - -// link links the path to the provided digest by writing the digest into the -// target file. Caller must ensure that the blob actually exists. -func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(ctx, path, []byte(dgst)) -} - -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(ctx, path) - if err != nil { - return "", err - } - - linked, err := digest.ParseDigest(string(content)) - if err != nil { - return "", err - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store path. -func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { - dgst, err := bs.readlink(ctx, path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -type blobStatter struct { - driver driver.StorageDriver -} - -var _ distribution.BlobDescriptorService = &blobStatter{} - -// Stat implements BlobStatter.Stat by returning the descriptor for the blob -// in the main blob store. If this method returns successfully, there is -// strong guarantee that the blob exists and is available. -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - path, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - fi, err := bs.driver.Stat(ctx, path) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrBlobUnknown - default: - return distribution.Descriptor{}, err - } - } - - if fi.IsDir() { - // NOTE(stevvooe): This represents a corruption situation. Somehow, we - // calculated a blob path and then detected a directory. We log the - // error and then error on the side of not knowing about the blob. - context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - // TODO(stevvooe): Add method to resolve the mediatype. We can store and - // cache a "global" media type for the blob, even if a specific repo has a - // mediatype that overrides the main one. - - return distribution.Descriptor{ - Size: fi.Size(), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, nil -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return distribution.ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go deleted file mode 100644 index 37903176..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go +++ /dev/null @@ -1,380 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "io" - "path" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var ( - errResumableDigestNotAvailable = errors.New("resumable digest not available") -) - -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. -type blobWriter struct { - blobStore *linkedBlobStore - - id string - startedAt time.Time - digester digest.Digester - written int64 // track the contiguous write - - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - bufferedFileWriter - - resumableDigestEnabled bool -} - -var _ distribution.BlobWriter = &blobWriter{} - -// ID returns the identifier for this upload. -func (bw *blobWriter) ID() string { - return bw.id -} - -func (bw *blobWriter) StartedAt() time.Time { - return bw.startedAt -} - -// Commit marks the upload as completed, returning a valid descriptor. The -// final size and digest are checked against the first descriptor provided. -func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - context.GetLogger(ctx).Debug("(*blobWriter).Commit") - - if err := bw.bufferedFileWriter.Close(); err != nil { - return distribution.Descriptor{}, err - } - - canonical, err := bw.validateBlob(ctx, desc) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.moveBlob(ctx, canonical); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.removeResources(ctx); err != nil { - return distribution.Descriptor{}, err - } - - err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) - if err != nil { - return distribution.Descriptor{}, err - } - - return canonical, nil -} - -// Rollback the blob upload process, releasing any resources associated with -// the writer and canceling the operation. -func (bw *blobWriter) Cancel(ctx context.Context) error { - context.GetLogger(ctx).Debug("(*blobWriter).Rollback") - if err := bw.removeResources(ctx); err != nil { - return err - } - - bw.Close() - return nil -} - -func (bw *blobWriter) Write(p []byte) (int, error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) - bw.written += int64(n) - - return n, err -} - -func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) - bw.written += nn - - return nn, err -} - -func (bw *blobWriter) Close() error { - if bw.err != nil { - return bw.err - } - - if err := bw.storeHashState(bw.blobStore.ctx); err != nil { - return err - } - - return bw.bufferedFileWriter.Close() -} - -// validateBlob checks the data against the digest, returning an error if it -// does not match. The canonical descriptor is returned. -func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if desc.Digest == "" { - // if no descriptors are provided, we have nothing to validate - // against. We don't really want to support this for the registry. - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Reason: fmt.Errorf("cannot validate against empty digest"), - } - } - - // Stat the on disk file - if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is - // not actually present for the reader. We now assume - // that the desc length is zero. - desc.Size = 0 - default: - // Any other error we want propagated up the stack. - return distribution.Descriptor{}, err - } - } else { - if fi.IsDir() { - return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) - } - - bw.size = fi.Size() - } - - if desc.Size > 0 { - if desc.Size != bw.size { - return distribution.Descriptor{}, distribution.ErrBlobInvalidLength - } - } else { - // if provided 0 or negative length, we can assume caller doesn't know or - // care about length. - desc.Size = bw.size - } - - // TODO(stevvooe): This section is very meandering. Need to be broken down - // to be a lot more clear. - - if err := bw.resumeDigestAt(ctx, bw.size); err == nil { - canonical = bw.digester.Digest() - - if canonical.Algorithm() == desc.Digest.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = desc.Digest == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else if err == errResumableDigestNotAvailable { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } else { - return distribution.Descriptor{}, err - } - - if fullHash { - // a fantastic optimization: if the the written data and the size are - // the same, we don't need to read the data from the backend. This is - // because we've written the entire file in the lifecycle of the - // current instance. - if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { - canonical = bw.digester.Digest() - verified = desc.Digest == canonical - } - - // If the check based on size fails, we fall back to the slowest of - // paths. We may be able to make the size-based check a stronger - // guarantee, so this may be defensive. - if !verified { - digester := digest.Canonical.New() - - digestVerifier, err := digest.NewDigestVerifier(desc.Digest) - if err != nil { - return distribution.Descriptor{}, err - } - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) - if err != nil { - return distribution.Descriptor{}, err - } - defer fr.Close() - - tr := io.TeeReader(fr, digester.Hash()) - - if _, err := io.Copy(digestVerifier, tr); err != nil { - return distribution.Descriptor{}, err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() - } - } - - if !verified { - context.GetLoggerWithFields(ctx, - map[interface{}]interface{}{ - "canonical": canonical, - "provided": desc.Digest, - }, "canonical", "provided"). - Errorf("canonical digest does match provided digest") - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Digest: desc.Digest, - Reason: fmt.Errorf("content does not match digest"), - } - } - - // update desc with canonical hash - desc.Digest = canonical - - if desc.MediaType == "" { - desc.MediaType = "application/octet-stream" - } - - return desc, nil -} - -// moveBlob moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { - blobPath, err := pathFor(blobDataPathSpec{ - digest: desc.Digest, - }) - - if err != nil { - return err - } - - // Check for existence - if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // tars. - if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the digest of an empty tar. - if desc.Digest == digest.DigestSha256EmptyTar { - return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.id", bw.ID()). - WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - // TODO(stevvooe): We should also write the mediatype when executing this move. - - return bw.blobStore.driver.Move(ctx, bw.path, blobPath) -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} - -func (bw *blobWriter) Reader() (io.ReadCloser, error) { - // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 - try := 1 - for try <= 5 { - _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) - if err == nil { - break - } - switch err.(type) { - case storagedriver.PathNotFoundError: - context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) - time.Sleep(1 * time.Second) - try++ - default: - return nil, err - } - } - - readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) - if err != nil { - return nil, err - } - - return readCloser, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go deleted file mode 100644 index 39166876..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build noresumabledigest - -package storage - -import ( - "github.com/docker/distribution/context" -) - -// resumeHashAt is a noop when resumable digest support is disabled. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { - return errResumableDigestNotAvailable -} - -// storeHashState is a noop when resumable digest support is disabled. -func (bw *blobWriter) storeHashState(ctx context.Context) error { - return errResumableDigestNotAvailable -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go deleted file mode 100644 index d33f544d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go +++ /dev/null @@ -1,178 +0,0 @@ -// +build !noresumabledigest - -package storage - -import ( - "fmt" - "io" - "os" - "path" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/stevvooe/resumable" - - // register resumable hashes with import - _ "github.com/stevvooe/resumable/sha256" - _ "github.com/stevvooe/resumable/sha512" -) - -// resumeDigestAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - - if offset == int64(h.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := bw.getStoredHashStates(ctx) - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset less than or equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := bw.driver.Delete(ctx, hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - h.Reset() - } else { - storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) - if err != nil { - return err - } - - if err = h.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(h.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired offset. - fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) - if err != nil { - return err - } - defer fr.Close() - - if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) - } - - if _, err := io.CopyN(h, fr, gapLen); err != nil { - return err - } - } - - return nil -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - list: true, - }) - - if err != nil { - return nil, err - } - - paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -func (bw *blobWriter) storeHashState(ctx context.Context) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - - uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - offset: int64(h.Len()), - }) - - if err != nil { - return err - } - - hashState, err := h.State() - if err != nil { - return err - } - - return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go deleted file mode 100644 index 42390953..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go +++ /dev/null @@ -1,179 +0,0 @@ -package cachecheck - -import ( - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" -) - -// CheckBlobDescriptorCache takes a cache implementation through a common set -// of operations. If adding new tests, please add them here so new -// implementations get the benefit. This should be used for unit tests. -func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { - ctx := context.Background() - - checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) - checkBlobDescriptorCacheSetAndRead(t, ctx, provider) -} - -func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { - t.Fatalf("expected unknown blob error with empty store: %v", err) - } - - cache, err := provider.RepositoryScoped("") - if err == nil { - t.Fatalf("expected an error when asking for invalid repo") - } - - cache, err = provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting repository: %v", err) - } - - if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ - Digest: "sha384:abc", - Size: 10, - MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { - t.Fatalf("expected error with invalid digest: %v", err) - } - - if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ - Digest: "", - Size: 10, - MediaType: "application/octet-stream"}); err == nil { - t.Fatalf("expected error setting value on invalid descriptor") - } - - if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { - t.Fatalf("expected error checking for cache item with empty digest: %v", err) - } - - if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { - t.Fatalf("expected unknown blob error with empty repo: %v", err) - } -} - -func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") - expected := distribution.Descriptor{ - Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", - Size: 10, - MediaType: "application/octet-stream"} - - cache, err := provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting scoped cache: %v", err) - } - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("error setting descriptor: %v", err) - } - - desc, err := cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error statting fake2:abc: %v", err) - } - - if expected != desc { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // also check that we set the canonical key ("fake:abc") - desc, err = cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("descriptor not returned for canonical key: %v", err) - } - - if expected != desc { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // ensure that global gets extra descriptor mapping - desc, err = provider.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) - } - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // get at it through canonical descriptor - desc, err = provider.Stat(ctx, expected.Digest) - if err != nil { - t.Fatalf("unexpected error checking glboal descriptor: %v", err) - } - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // now, we set the repo local mediatype to something else and ensure it - // doesn't get changed in the provider cache. - expected.MediaType = "application/json" - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("unexpected error setting descriptor: %v", err) - } - - desc, err = cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error getting descriptor: %v", err) - } - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) - } - - desc, err = provider.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error getting global descriptor: %v", err) - } - - expected.MediaType = "application/octet-stream" // expect original mediatype in global - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) - } -} - -func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc") - expected := distribution.Descriptor{ - Digest: "sha256:abc", - Size: 10, - MediaType: "application/octet-stream"} - - cache, err := provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting scoped cache: %v", err) - } - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("error setting descriptor: %v", err) - } - - desc, err := cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error statting fake2:abc: %v", err) - } - - if expected != desc { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - err = cache.Clear(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error deleting descriptor") - } - - nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - err = cache.Clear(ctx, nonExistantDigest) - if err == nil { - t.Fatalf("expected error deleting unknown descriptor") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go deleted file mode 100644 index cb264b09..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go +++ /dev/null @@ -1,268 +0,0 @@ -package redis - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - "github.com/garyburd/redigo/redis" -) - -// redisBlobStatService provides an implementation of -// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in -// two parts. The first provide fast access to repository membership through a -// redis set for each repo. The second is a redis hash keyed by the digest of -// the layer, providing path, length and mediatype information. There is also -// a per-repository redis hash of the blob descriptor, allowing override of -// data. This is currently used to override the mediatype on a per-repository -// basis. -// -// Note that there is no implied relationship between these two caches. The -// layer may exist in one, both or none and the code must be written this way. -type redisBlobDescriptorService struct { - pool *redis.Pool - - // TODO(stevvooe): We use a pool because we don't have great control over - // the cache lifecycle to manage connections. A new connection if fetched - // for each operation. Once we have better lifecycle management of the - // request objects, we can change this to a connection. -} - -// NewRedisBlobDescriptorCacheProvider returns a new redis-based -// BlobDescriptorCacheProvider using the provided redis connection pool. -func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { - return &redisBlobDescriptorService{ - pool: pool, - } -} - -// RepositoryScoped returns the scoped cache. -func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - return &repositoryScopedRedisBlobDescriptorService{ - repo: repo, - upstream: rbds, - }, nil -} - -// Stat retrieves the descriptor data from the redis hash entry. -func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - conn := rbds.pool.Get() - defer conn.Close() - - return rbds.stat(ctx, conn, dgst) -} - -func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { - if err := dgst.Validate(); err != nil { - return err - } - - conn := rbds.pool.Get() - defer conn.Close() - - // Not atomic in redis <= 2.3 - reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") - if err != nil { - return err - } - - if reply == 0 { - return distribution.ErrBlobUnknown - } - - return nil -} - -// stat provides an internal stat call that takes a connection parameter. This -// allows some internal management of the connection scope. -func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { - reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) - if err != nil { - return distribution.Descriptor{}, err - } - - // NOTE(stevvooe): The "size" field used to be "length". We treat a - // missing "size" field here as an unknown blob, which causes a cache - // miss, effectively migrating the field. - if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - var desc distribution.Descriptor - if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { - return distribution.Descriptor{}, err - } - - return desc, nil -} - -// SetDescriptor sets the descriptor data for the given digest using a redis -// hash. A hash is used here since we may store unrelated fields about a layer -// in the future. -func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - conn := rbds.pool.Get() - defer conn.Close() - - return rbds.setDescriptor(ctx, conn, dgst, desc) -} - -func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { - if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), - "digest", desc.Digest, - "size", desc.Size); err != nil { - return err - } - - // Only set mediatype if not already set. - if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), - "mediatype", desc.MediaType); err != nil { - return err - } - - return nil -} - -func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { - return "blobs::" + dgst.String() -} - -type repositoryScopedRedisBlobDescriptorService struct { - repo string - upstream *redisBlobDescriptorService -} - -var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} - -// Stat ensures that the digest is a member of the specified repository and -// forwards the descriptor request to the global blob store. If the media type -// differs for the repository, we override it. -func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - // Check membership to repository first - member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) - if err != nil { - return distribution.Descriptor{}, err - } - - if !member { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - // We allow a per repository mediatype, let's look it up here. - mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) - if err != nil { - return distribution.Descriptor{}, err - } - - if mediatype != "" { - upstream.MediaType = mediatype - } - - return upstream, nil -} - -// Clear removes the descriptor from the cache and forwards to the upstream descriptor store -func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { - if err := dgst.Validate(); err != nil { - return err - } - - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - // Check membership to repository first - member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) - if err != nil { - return err - } - - if !member { - return distribution.ErrBlobUnknown - } - - return rsrbds.upstream.Clear(ctx, dgst) -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - if dgst != desc.Digest { - if dgst.Algorithm() == desc.Digest.Algorithm() { - return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) - } - } - - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - return rsrbds.setDescriptor(ctx, conn, dgst, desc) -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { - if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { - return err - } - - if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { - return err - } - - // Override repository mediatype. - if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { - return err - } - - // Also set the values for the primary descriptor, if they differ by - // algorithm (ie sha256 vs sha512). - if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { - if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { - return err - } - } - - return nil -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { - return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() -} - -func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { - return "repository::" + rsrbds.repo + "::blobs" -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go deleted file mode 100644 index 481489f2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go +++ /dev/null @@ -1,66 +0,0 @@ -package storage - -import ( - "errors" - "io" - "path" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// ErrFinishedWalk is used when the called walk function no longer wants -// to accept any more values. This is used for pagination when the -// required number of repos have been found. -var ErrFinishedWalk = errors.New("finished walk") - -// Returns a list, or partial list, of repositories in the registry. -// Because it's a quite expensive operation, it should only be used when building up -// an initial set of repositories. -func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { - var foundRepos []string - - if len(repos) == 0 { - return 0, errors.New("no space in slice") - } - - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return 0, err - } - - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - - // lop the base path off - repoPath := filePath[len(root)+1:] - - _, file := path.Split(repoPath) - if file == "_layers" { - repoPath = strings.TrimSuffix(repoPath, "/_layers") - if repoPath > last { - foundRepos = append(foundRepos, repoPath) - } - return ErrSkipDir - } else if strings.HasPrefix(file, "_") { - return ErrSkipDir - } - - // if we've filled our array, no need to walk any further - if len(foundRepos) == len(repos) { - return ErrFinishedWalk - } - - return nil - }) - - n = copy(repos, foundRepos) - - // Signal that we have no more entries by setting EOF - if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { - errVal = io.EOF - } - - return n, errVal -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go deleted file mode 100644 index 387d9234..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package storage contains storage services for use in the registry -// application. It should be considered an internal package, as of Go 1.4. -package storage diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go deleted file mode 100644 index cbb95981..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go +++ /dev/null @@ -1,366 +0,0 @@ -// Package azure provides a storagedriver.StorageDriver implementation to -// store blobs in Microsoft Azure Blob Storage Service. -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -const driverName = "azure" - -const ( - paramAccountName = "accountname" - paramAccountKey = "accountkey" - paramContainer = "container" - paramRealm = "realm" -) - -type driver struct { - client azure.BlobStorageClient - container string -} - -type baseEmbed struct{ base.Base } - -// Driver is a storagedriver.StorageDriver implementation backed by -// Microsoft Azure Blob Storage Service. -type Driver struct{ baseEmbed } - -func init() { - factory.Register(driverName, &azureDriverFactory{}) -} - -type azureDriverFactory struct{} - -func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -// FromParameters constructs a new Driver with a given parameters map. -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - accountName, ok := parameters[paramAccountName] - if !ok || fmt.Sprint(accountName) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountName) - } - - accountKey, ok := parameters[paramAccountKey] - if !ok || fmt.Sprint(accountKey) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) - } - - container, ok := parameters[paramContainer] - if !ok || fmt.Sprint(container) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramContainer) - } - - realm, ok := parameters[paramRealm] - if !ok || fmt.Sprint(realm) == "" { - realm = azure.DefaultBaseURL - } - - return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) -} - -// New constructs a new Driver with the given Azure Storage Account credentials -func New(accountName, accountKey, container, realm string) (*Driver, error) { - api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) - if err != nil { - return nil, err - } - - blobClient := api.GetBlobService() - - // Create registry container - if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { - return nil, err - } - - d := &driver{ - client: blobClient, - container: container} - return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil -} - -// Implement the storagedriver.StorageDriver interface. -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - blob, err := d.client.GetBlob(d.container, path) - if err != nil { - if is404(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - return ioutil.ReadAll(blob) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { - return err - } - if err := d.client.CreateBlockBlob(d.container, path); err != nil { - return err - } - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) - return err -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if !ok { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - info, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - size := int64(info.ContentLength) - if offset >= size { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - bytesRange := fmt.Sprintf("%v-", offset) - resp, err := d.client.GetBlobRange(d.container, path, bytesRange) - if err != nil { - return nil, err - } - return resp, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { - if blobExists, err := d.client.BlobExists(d.container, path); err != nil { - return 0, err - } else if !blobExists { - err := d.client.CreateBlockBlob(d.container, path) - if err != nil { - return 0, err - } - } - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - zw := newZeroFillWriter(&bw) - return zw.Write(d.container, path, offset, reader) -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // Check if the path is a blob - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if ok { - blob, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - mtim, err := time.Parse(http.TimeFormat, blob.LastModified) - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(blob.ContentLength), - ModTime: mtim, - IsDir: false, - }}, nil - } - - // Check if path is a virtual container - virtContainerPath := path - if !strings.HasSuffix(virtContainerPath, "/") { - virtContainerPath += "/" - } - blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Prefix: virtContainerPath, - MaxResults: 1, - }) - if err != nil { - return nil, err - } - if len(blobs.Blobs) > 0 { - // path is a virtual container - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - IsDir: true, - }}, nil - } - - // path is not a blob or virtual container - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - if path == "/" { - path = "" - } - - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return blobs, err - } - - list := directDescendants(blobs, path) - return list, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) - err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) - if err != nil { - if is404(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err - } - - return d.client.DeleteBlob(d.container, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - ok, err := d.client.DeleteBlobIfExists(d.container, path) - if err != nil { - return err - } - if ok { - return nil // was a blob and deleted, return - } - - // Not a blob, see if path is a virtual container with blobs - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return err - } - - for _, b := range blobs { - if err = d.client.DeleteBlob(d.container, b); err != nil { - return err - } - } - - if len(blobs) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - return nil -} - -// URLFor returns a publicly accessible URL for the blob stored at given path -// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration - expires, ok := options["expiry"] - if ok { - t, ok := expires.(time.Time) - if ok { - expiresTime = t - } - } - return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") -} - -// directDescendants will find direct descendants (blobs or virtual containers) -// of from list of blob paths and will return their full paths. Elements in blobs -// list must be prefixed with a "/" and -// -// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is -// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} -func directDescendants(blobs []string, prefix string) []string { - if !strings.HasPrefix(prefix, "/") { // add trailing '/' - prefix = "/" + prefix - } - if !strings.HasSuffix(prefix, "/") { // containerify the path - prefix += "/" - } - - out := make(map[string]bool) - for _, b := range blobs { - if strings.HasPrefix(b, prefix) { - rel := b[len(prefix):] - c := strings.Count(rel, "/") - if c == 0 { - out[b] = true - } else { - out[prefix+rel[:strings.Index(rel, "/")]] = true - } - } - } - - var keys []string - for k := range out { - keys = append(keys, k) - } - return keys -} - -func (d *driver) listBlobs(container, virtPath string) ([]string, error) { - if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path - virtPath += "/" - } - - out := []string{} - marker := "" - for { - resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Marker: marker, - Prefix: virtPath, - }) - - if err != nil { - return out, err - } - - for _, b := range resp.Blobs { - out = append(out, b.Name) - } - - if len(resp.Blobs) == 0 || resp.NextMarker == "" { - break - } - marker = resp.NextMarker - } - return out, nil -} - -func is404(err error) bool { - e, ok := err.(azure.AzureStorageServiceError) - return ok && e.StatusCode == http.StatusNotFound -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go deleted file mode 100644 index 1c1df899..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go +++ /dev/null @@ -1,24 +0,0 @@ -package azure - -import ( - "fmt" - "io" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// azureBlockStorage is adaptor between azure.BlobStorageClient and -// blockStorage interface. -type azureBlockStorage struct { - azure.BlobStorageClient -} - -func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) -} - -func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { - a := azureBlockStorage{} - a.BlobStorageClient = b - return a -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go deleted file mode 100644 index 776c7cd5..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go +++ /dev/null @@ -1,60 +0,0 @@ -package azure - -import ( - "encoding/base64" - "fmt" - "math/rand" - "sync" - "time" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type blockIDGenerator struct { - pool map[string]bool - r *rand.Rand - m sync.Mutex -} - -// Generate returns an unused random block id and adds the generated ID -// to list of used IDs so that the same block name is not used again. -func (b *blockIDGenerator) Generate() string { - b.m.Lock() - defer b.m.Unlock() - - var id string - for { - id = toBlockID(int(b.r.Int())) - if !b.exists(id) { - break - } - } - b.pool[id] = true - return id -} - -func (b *blockIDGenerator) exists(id string) bool { - _, used := b.pool[id] - return used -} - -func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { - b.m.Lock() - defer b.m.Unlock() - - for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { - b.pool[bl.Name] = true - } -} - -func newBlockIDGenerator() *blockIDGenerator { - return &blockIDGenerator{ - pool: make(map[string]bool), - r: rand.New(rand.NewSource(time.Now().UnixNano()))} -} - -// toBlockId converts given integer to base64-encoded block ID of a fixed length. -func toBlockID(i int) string { - s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs - return base64.StdEncoding.EncodeToString([]byte(s)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go deleted file mode 100644 index f18692d0..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go +++ /dev/null @@ -1,208 +0,0 @@ -package azure - -import ( - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// blockStorage is the interface required from a block storage service -// client implementation -type blockStorage interface { - CreateBlockBlob(container, blob string) error - GetBlob(container, blob string) (io.ReadCloser, error) - GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) - PutBlock(container, blob, blockID string, chunk []byte) error - GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) - PutBlockList(container, blob string, blocks []azure.Block) error -} - -// randomBlobWriter enables random access semantics on Azure block blobs -// by enabling writing arbitrary length of chunks to arbitrary write offsets -// within the blob. Normally, Azure Blob Storage does not support random -// access semantics on block blobs; however, this writer can download, split and -// reupload the overlapping blocks and discards those being overwritten entirely. -type randomBlobWriter struct { - bs blockStorage - blockSize int -} - -func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { - return randomBlobWriter{bs: bs, blockSize: blockSize} -} - -// WriteBlobAt writes the given chunk to the specified position of an existing blob. -// The offset must be equals to size of the blob or smaller than it. -func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { - rand := newBlockIDGenerator() - - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - rand.Feed(blocks) // load existing block IDs - - // Check for write offset for existing blob - size := getBlobSize(blocks) - if offset < 0 || offset > size { - return 0, fmt.Errorf("wrong offset for Write: %v", offset) - } - - // Upload the new chunk as blocks - blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) - if err != nil { - return 0, err - } - - // For non-append operations, existing blocks may need to be splitted - if offset != size { - // Split the block on the left end (if any) - leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) - if err != nil { - return 0, err - } - blockList = append(leftBlocks, blockList...) - - // Split the block on the right end (if any) - rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) - if err != nil { - return 0, err - } - blockList = append(blockList, rightBlocks...) - } else { - // Use existing block list - var existingBlocks []azure.Block - for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } - blockList = append(existingBlocks, blockList...) - } - // Put block list - return nn, r.bs.PutBlockList(container, blob, blockList) -} - -func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - return getBlobSize(blocks), nil -} - -// writeChunkToBlocks writes given chunk to one or multiple blocks within specified -// blob and returns their block representations. Those blocks are not committed, yet -func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { - var newBlocks []azure.Block - var nn int64 - - // Read chunks of at most size N except the last chunk to - // maximize block size and minimize block count. - buf := make([]byte, r.blockSize) - for { - n, err := io.ReadFull(chunk, buf) - if err == io.EOF { - break - } - nn += int64(n) - data := buf[:n] - blockID := rand.Generate() - if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { - return newBlocks, nn, err - } - newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) - } - return newBlocks, nn, nil -} - -// blocksLeftSide returns the blocks that are going to be at the left side of -// the writeOffset: [0, writeOffset) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { - var left []azure.Block - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return left, err - } - - o := writeOffset - elapsed := int64(0) - for _, v := range bx.CommittedBlocks { - blkSize := int64(v.Size) - if o >= blkSize { // use existing block - left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - o -= blkSize - elapsed += blkSize - } else if o > 0 { // current block needs to be splitted - start := elapsed - size := o - part, err := r.bs.GetSectionReader(container, blob, start, size) - if err != nil { - return left, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return left, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return left, err - } - left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - break - } - } - return left, nil -} - -// blocksRightSide returns the blocks that are going to be at the right side of -// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { - var right []azure.Block - - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return nil, err - } - - re := writeOffset + chunkSize - 1 // right end of written chunk - var elapsed int64 - for _, v := range bx.CommittedBlocks { - var ( - bs = elapsed // left end of current block - be = elapsed + int64(v.Size) - 1 // right end of current block - ) - - if bs > re { // take the block as is - right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } else if be > re { // current block needs to be splitted - part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) - if err != nil { - return right, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return right, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return right, err - } - right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - } - elapsed += int64(v.Size) - } - return right, nil -} - -func getBlobSize(blocks azure.BlockListResponse) int64 { - var n int64 - for _, v := range blocks.CommittedBlocks { - n += int64(v.Size) - } - return n -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go deleted file mode 100644 index 095489d2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go +++ /dev/null @@ -1,49 +0,0 @@ -package azure - -import ( - "bytes" - "io" -) - -type blockBlobWriter interface { - GetSize(container, blob string) (int64, error) - WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) -} - -// zeroFillWriter enables writing to an offset outside a block blob's size -// by offering the chunk to the underlying writer as a contiguous data with -// the gap in between filled with NUL (zero) bytes. -type zeroFillWriter struct { - blockBlobWriter -} - -func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { - w := zeroFillWriter{} - w.blockBlobWriter = b - return w -} - -// Write writes the given chunk to the specified existing blob even though -// offset is out of blob's size. The gaps are filled with zeros. Returned -// written number count does not include zeros written. -func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { - size, err := z.blockBlobWriter.GetSize(container, blob) - if err != nil { - return 0, err - } - - var reader io.Reader - var zeroPadding int64 - if offset <= size { - reader = chunk - } else { - zeroPadding = offset - size - offset = size // adjust offset to be the append index - zeros := bytes.NewReader(make([]byte, zeroPadding)) - reader = io.MultiReader(zeros, chunk) - } - - nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) - nn -= zeroPadding - return nn, err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go deleted file mode 100644 index c816d2d6..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go +++ /dev/null @@ -1,202 +0,0 @@ -// Package base provides a base implementation of the storage driver that can -// be used to implement common checks. The goal is to increase the amount of -// code sharing. -// -// The canonical approach to use this class is to embed in the exported driver -// struct such that calls are proxied through this implementation. First, -// declare the internal driver, as follows: -// -// type driver struct { ... internal ...} -// -// The resulting type should implement StorageDriver such that it can be the -// target of a Base struct. The exported type can then be declared as follows: -// -// type Driver struct { -// Base -// } -// -// Because Driver embeds Base, it effectively implements Base. If the driver -// needs to intercept a call, before going to base, Driver should implement -// that method. Effectively, Driver can intercept calls before coming in and -// driver implements the actual logic. -// -// To further shield the embed from other packages, it is recommended to -// employ a private embed struct: -// -// type baseEmbed struct { -// base.Base -// } -// -// Then, declare driver to embed baseEmbed, rather than Base directly: -// -// type Driver struct { -// baseEmbed -// } -// -// The type now implements StorageDriver, proxying through Base, without -// exporting an unnecessary field. -package base - -import ( - "io" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// Base provides a wrapper around a storagedriver implementation that provides -// common path and bounds checking. -type Base struct { - storagedriver.StorageDriver -} - -// Format errors received from the storage driver -func (base *Base) setDriverName(e error) error { - switch actual := e.(type) { - case nil: - return nil - case storagedriver.ErrUnsupportedMethod: - actual.DriverName = base.StorageDriver.Name() - return actual - case storagedriver.PathNotFoundError: - actual.DriverName = base.StorageDriver.Name() - return actual - case storagedriver.InvalidPathError: - actual.DriverName = base.StorageDriver.Name() - return actual - case storagedriver.InvalidOffsetError: - actual.DriverName = base.StorageDriver.Name() - return actual - default: - storageError := storagedriver.Error{ - DriverName: base.StorageDriver.Name(), - Enclosed: e, - } - - return storageError - } -} - -// GetContent wraps GetContent of underlying storage driver. -func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.GetContent(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - b, e := base.StorageDriver.GetContent(ctx, path) - return b, base.setDriverName(e) -} - -// PutContent wraps PutContent of underlying storage driver. -func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.PutContent(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) -} - -// ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - rc, e := base.StorageDriver.ReadStream(ctx, path, offset) - return rc, base.setDriverName(e) -} - -// WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return 0, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - i64, e := base.StorageDriver.WriteStream(ctx, path, offset, reader) - return i64, base.setDriverName(e) -} - -// Stat wraps Stat of underlying storage driver. -func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.Stat(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - fi, e := base.StorageDriver.Stat(ctx, path) - return fi, base.setDriverName(e) -} - -// List wraps List of underlying storage driver. -func (base *Base) List(ctx context.Context, path string) ([]string, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.List(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) && path != "/" { - return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - str, e := base.StorageDriver.List(ctx, path) - return str, base.setDriverName(e) -} - -// Move wraps Move of underlying storage driver. -func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) - - if !storagedriver.PathRegexp.MatchString(sourcePath) { - return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} - } else if !storagedriver.PathRegexp.MatchString(destPath) { - return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} - } - - return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) -} - -// Delete wraps Delete of underlying storage driver. -func (base *Base) Delete(ctx context.Context, path string) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.Delete(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - return base.setDriverName(base.StorageDriver.Delete(ctx, path)) -} - -// URLFor wraps URLFor of underlying storage driver. -func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.URLFor(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} - } - - str, e := base.StorageDriver.URLFor(ctx, path, options) - return str, base.setDriverName(e) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go deleted file mode 100644 index e84f0026..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go +++ /dev/null @@ -1,55 +0,0 @@ -package factory - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// driverFactories stores an internal mapping between storage driver names and their respective -// factories -var driverFactories = make(map[string]StorageDriverFactory) - -// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces -// Storage drivers should call Register() with a factory to make the driver available by name -type StorageDriverFactory interface { - // Create returns a new storagedriver.StorageDriver with the given parameters - // Parameters will vary by driver and may be ignored - // Each parameter key must only consist of lowercase letters and numbers - Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) -} - -// Register makes a storage driver available by the provided name. -// If Register is called twice with the same name or if driver factory is nil, it panics. -func Register(name string, factory StorageDriverFactory) { - if factory == nil { - panic("Must not provide nil StorageDriverFactory") - } - _, registered := driverFactories[name] - if registered { - panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) - } - - driverFactories[name] = factory -} - -// Create a new storagedriver.StorageDriver with the given name and -// parameters. To use a driver, the StorageDriverFactory must first be -// registered with the given name. If no drivers are found, an -// InvalidStorageDriverError is returned -func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - driverFactory, ok := driverFactories[name] - if !ok { - return nil, InvalidStorageDriverError{name} - } - return driverFactory.Create(parameters) -} - -// InvalidStorageDriverError records an attempt to construct an unregistered storage driver -type InvalidStorageDriverError struct { - Name string -} - -func (err InvalidStorageDriverError) Error() string { - return fmt.Sprintf("StorageDriver not registered: %s", err.Name) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go deleted file mode 100644 index e5064029..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go +++ /dev/null @@ -1,79 +0,0 @@ -package driver - -import "time" - -// FileInfo returns information about a given path. Inspired by os.FileInfo, -// it elides the base name method for a full path instead. -type FileInfo interface { - // Path provides the full path of the target of this file info. - Path() string - - // Size returns current length in bytes of the file. The return value can - // be used to write to the end of the file at path. The value is - // meaningless if IsDir returns true. - Size() int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime() time.Time - - // IsDir returns true if the path is a directory. - IsDir() bool -} - -// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal -// should only be used by storagedriver implementations. They should moved to -// a "driver" package, similar to database/sql. - -// FileInfoFields provides the exported fields for implementing FileInfo -// interface in storagedriver implementations. It should be used with -// InternalFileInfo. -type FileInfoFields struct { - // Path provides the full path of the target of this file info. - Path string - - // Size is current length in bytes of the file. The value of this field - // can be used to write to the end of the file at path. The value is - // meaningless if IsDir is set to true. - Size int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime time.Time - - // IsDir returns true if the path is a directory. - IsDir bool -} - -// FileInfoInternal implements the FileInfo interface. This should only be -// used by storagedriver implementations that don't have a specialized -// FileInfo type. -type FileInfoInternal struct { - FileInfoFields -} - -var _ FileInfo = FileInfoInternal{} -var _ FileInfo = &FileInfoInternal{} - -// Path provides the full path of the target of this file info. -func (fi FileInfoInternal) Path() string { - return fi.FileInfoFields.Path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi FileInfoInternal) Size() int64 { - return fi.FileInfoFields.Size -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi FileInfoInternal) ModTime() time.Time { - return fi.FileInfoFields.ModTime -} - -// IsDir returns true if the path is a directory. -func (fi FileInfoInternal) IsDir() bool { - return fi.FileInfoFields.IsDir -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go deleted file mode 100644 index 5b495818..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go +++ /dev/null @@ -1,288 +0,0 @@ -package filesystem - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "filesystem" -const defaultRootDirectory = "/var/lib/registry" - -func init() { - factory.Register(driverName, &filesystemDriverFactory{}) -} - -// filesystemDriverFactory implements the factory.StorageDriverFactory interface -type filesystemDriverFactory struct{} - -func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters), nil -} - -type driver struct { - rootDirectory string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local -// filesystem. All provided paths will be subpaths of the RootDirectory. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Optional Parameters: -// - rootdirectory -func FromParameters(parameters map[string]interface{}) *Driver { - var rootDirectory = defaultRootDirectory - if parameters != nil { - rootDir, ok := parameters["rootdirectory"] - if ok { - rootDirectory = fmt.Sprint(rootDir) - } - } - return New(rootDirectory) -} - -// New constructs a new Driver with a given rootDirectory -func New(rootDirectory string) *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - rootDirectory: rootDirectory, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { - if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return os.Truncate(d.fullPath(subPath), int64(len(contents))) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return nil, err - } - - seekPos, err := file.Seek(int64(offset), os.SEEK_SET) - if err != nil { - file.Close() - return nil, err - } else if seekPos < int64(offset) { - file.Close() - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return file, nil -} - -// WriteStream stores the contents of the provided io.Reader at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { - // TODO(stevvooe): This needs to be a requirement. - // if !path.IsAbs(subPath) { - // return fmt.Errorf("absolute path required: %q", subPath) - // } - - fullPath := d.fullPath(subPath) - parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0777); err != nil { - return 0, err - } - - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - // TODO(stevvooe): A few missing conditions in storage driver: - // 1. What if the path is already a directory? - // 2. Should number 1 be exposed explicitly in storagedriver? - // 2. Can this path not exist, even if we create above? - return 0, err - } - defer fp.Close() - - nn, err = fp.Seek(offset, os.SEEK_SET) - if err != nil { - return 0, err - } - - if nn != offset { - return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) - } - - return io.Copy(fp, reader) -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { - fullPath := d.fullPath(subPath) - - fi, err := os.Stat(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - - return nil, err - } - - return fileInfo{ - path: subPath, - FileInfo: fi, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { - fullPath := d.fullPath(subPath) - - dir, err := os.Open(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - return nil, err - } - - defer dir.Close() - - fileNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - - keys := make([]string, 0, len(fileNames)) - for _, fileName := range fileNames { - keys = append(keys, path.Join(subPath, fileName)) - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - source := d.fullPath(sourcePath) - dest := d.fullPath(destPath) - - if _, err := os.Stat(source); os.IsNotExist(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - - if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { - return err - } - - err := os.Rename(source, dest) - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, subPath string) error { - fullPath := d.fullPath(subPath) - - _, err := os.Stat(fullPath) - if err != nil && !os.IsNotExist(err) { - return err - } else if err != nil { - return storagedriver.PathNotFoundError{Path: subPath} - } - - err = os.RemoveAll(fullPath) - return err -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} - -// fullPath returns the absolute path of a key within the Driver's storage. -func (d *driver) fullPath(subPath string) string { - return path.Join(d.rootDirectory, subPath) -} - -type fileInfo struct { - os.FileInfo - path string -} - -var _ storagedriver.FileInfo = fileInfo{} - -// Path provides the full path of the target of this file info. -func (fi fileInfo) Path() string { - return fi.path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi fileInfo) Size() int64 { - if fi.IsDir() { - return 0 - } - - return fi.FileInfo.Size() -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi fileInfo) ModTime() time.Time { - return fi.FileInfo.ModTime() -} - -// IsDir returns true if the path is a directory. -func (fi fileInfo) IsDir() bool { - return fi.FileInfo.IsDir() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/doc.go deleted file mode 100644 index 0f23ea78..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package gcs implements the Google Cloud Storage driver backend. Support can be -// enabled by including the "include_gcs" build tag. -package gcs diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go deleted file mode 100644 index 0e3480f2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go +++ /dev/null @@ -1,670 +0,0 @@ -// Package gcs provides a storagedriver.StorageDriver implementation to -// store blobs in Google cloud storage. -// -// This package leverages the google.golang.org/cloud/storage client library -//for interfacing with gcs. -// -// Because gcs is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that gcs guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. -// -// +build include_gcs - -package gcs - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "net/url" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/api/googleapi" - storageapi "google.golang.org/api/storage/v1" - "google.golang.org/cloud" - "google.golang.org/cloud/storage" - - ctx "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "gcs" -const dummyProjectID = "" - -// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set -type driverParameters struct { - bucket string - config *jwt.Config - email string - privateKey []byte - client *http.Client - rootDirectory string -} - -func init() { - factory.Register(driverName, &gcsDriverFactory{}) -} - -// gcsDriverFactory implements the factory.StorageDriverFactory interface -type gcsDriverFactory struct{} - -// Create StorageDriver from parameters -func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -// driver is a storagedriver.StorageDriver implementation backed by GCS -// Objects are stored at absolute keys in the provided bucket. -type driver struct { - client *http.Client - bucket string - email string - privateKey []byte - rootDirectory string -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - bucket -func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - var ts oauth2.TokenSource - jwtConf := new(jwt.Config) - if keyfile, ok := parameters["keyfile"]; ok { - jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) - if err != nil { - return nil, err - } - jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) - if err != nil { - return nil, err - } - ts = jwtConf.TokenSource(context.Background()) - } else { - var err error - ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) - if err != nil { - return nil, err - } - - } - - params := driverParameters{ - bucket: fmt.Sprint(bucket), - rootDirectory: fmt.Sprint(rootDirectory), - email: jwtConf.Email, - privateKey: jwtConf.PrivateKey, - client: oauth2.NewClient(context.Background(), ts), - } - - return New(params) -} - -// New constructs a new driver -func New(params driverParameters) (storagedriver.StorageDriver, error) { - rootDirectory := strings.Trim(params.rootDirectory, "/") - if rootDirectory != "" { - rootDirectory += "/" - } - d := &driver{ - bucket: params.bucket, - rootDirectory: rootDirectory, - email: params.email, - privateKey: params.privateKey, - client: params.client, - } - - return &base.Base{ - StorageDriver: d, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -// This should primarily be used for small objects. -func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(context, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -// This should primarily be used for small objects. -func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - defer wc.Close() - _, err := wc.Write(contents) - return err -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" -// with a given byte offset. -// May be used to resume reading a stream by providing a nonzero offset. -func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { - name := d.pathToKey(path) - - // copied from google.golang.org/cloud/storage#NewReader : - // to set the additional "Range" header - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", d.bucket, name), - } - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - if offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) - } - res, err := d.client.Do(req) - if err != nil { - return nil, err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, storagedriver.PathNotFoundError{Path: path} - } - if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { - res.Body.Close() - obj, err := storageStatObject(d.context(context), d.bucket, name) - if err != nil { - return nil, err - } - if offset == int64(obj.Size) { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - if res.StatusCode < 200 || res.StatusCode > 299 { - res.Body.Close() - return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", d.bucket, name, res.Status) - } - return res.Body, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a -// location designated by the given path. -// May be used to resume writing a stream by providing a nonzero offset. -// The offset must be no larger than the CurrentSize for this path. -func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - if offset == 0 { - return d.writeCompletely(context, path, 0, reader) - } - - service, err := storageapi.New(d.client) - if err != nil { - return 0, err - } - objService := storageapi.NewObjectsService(service) - var obj *storageapi.Object - err = retry(5, func() error { - o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() - obj = o - return err - }) - // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) - if err != nil { - return 0, err - } - - // cannot append more chunks, so redo from scratch - if obj.ComponentCount >= 1023 { - return d.writeCompletely(context, path, offset, reader) - } - - // skip from reader - objSize := int64(obj.Size) - nn, err := skip(reader, objSize-offset) - if err != nil { - return nn, err - } - - // Size <= offset - partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) - gcsContext := d.context(context) - wc := storage.NewWriter(gcsContext, d.bucket, partName) - wc.ContentType = "application/octet-stream" - - if objSize < offset { - err = writeZeros(wc, offset-objSize) - if err != nil { - wc.CloseWithError(err) - return nn, err - } - } - n, err := io.Copy(wc, reader) - if err != nil { - wc.CloseWithError(err) - return nn, err - } - err = wc.Close() - if err != nil { - return nn, err - } - // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end - // of the function - defer storageDeleteObject(gcsContext, d.bucket, partName) - - req := &storageapi.ComposeRequest{ - Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, - SourceObjects: []*storageapi.ComposeRequestSourceObjects{ - { - Name: obj.Name, - Generation: obj.Generation, - }, { - Name: partName, - Generation: wc.Object().Generation, - }}, - } - - err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) - if err == nil { - nn = nn + n - } - - return nn, err -} - -type request func() error - -func retry(maxTries int, req request) error { - backoff := time.Second - var err error - for i := 0; i < maxTries; i++ { - err = req() - if err == nil { - return nil - } - - status, ok := err.(*googleapi.Error) - if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { - return err - } - - time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) - if i <= 4 { - backoff = backoff * 2 - } - } - return err -} - -func (d *driver) writeCompletely(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - defer wc.Close() - - // Copy the first offset bytes of the existing contents - // (padded with zeros if needed) into the writer - if offset > 0 { - existing, err := d.ReadStream(context, path, 0) - if err != nil { - return 0, err - } - defer existing.Close() - n, err := io.CopyN(wc, existing, offset) - if err == io.EOF { - err = writeZeros(wc, offset-n) - } - if err != nil { - return 0, err - } - } - return io.Copy(wc, reader) -} - -func skip(reader io.Reader, count int64) (int64, error) { - if count <= 0 { - return 0, nil - } - return io.CopyN(ioutil.Discard, reader, count) -} - -func writeZeros(wc io.Writer, count int64) error { - buf := make([]byte, 32*1024) - for count > 0 { - size := cap(buf) - if int64(size) > count { - size = int(count) - } - n, err := wc.Write(buf[0:size]) - if err != nil { - return err - } - count = count - int64(n) - } - return nil -} - -// Stat retrieves the FileInfo for the given path, including the current -// size in bytes and the creation time. -func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { - var fi storagedriver.FileInfoFields - //try to get as file - gcsContext := d.context(context) - obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) - if err == nil { - fi = storagedriver.FileInfoFields{ - Path: path, - Size: obj.Size, - ModTime: obj.Updated, - IsDir: false, - } - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } - //try to get as folder - dirpath := d.pathToDirKey(path) - - var query *storage.Query - query = &storage.Query{} - query.Prefix = dirpath - query.MaxResults = 1 - - objects, err := storageListObjects(gcsContext, d.bucket, query) - if err != nil { - return nil, err - } - if len(objects.Results) < 1 { - return nil, storagedriver.PathNotFoundError{Path: path} - } - fi = storagedriver.FileInfoFields{ - Path: path, - IsDir: true, - } - obj = objects.Results[0] - if obj.Name == dirpath { - fi.Size = obj.Size - fi.ModTime = obj.Updated - } - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the -//given path. -func (d *driver) List(context ctx.Context, path string) ([]string, error) { - var query *storage.Query - query = &storage.Query{} - query.Delimiter = "/" - query.Prefix = d.pathToDirKey(path) - list := make([]string, 0, 64) - for { - objects, err := storageListObjects(d.context(context), d.bucket, query) - if err != nil { - return nil, err - } - for _, object := range objects.Results { - // GCS does not guarantee strong consistency between - // DELETE and LIST operationsCheck that the object is not deleted, - // so filter out any objects with a non-zero time-deleted - if object.Deleted.IsZero() { - name := object.Name - // Ignore objects with names that end with '#' (these are uploaded parts) - if name[len(name)-1] != '#' { - name = d.keyToPath(name) - list = append(list, name) - } - } - } - for _, subpath := range objects.Prefixes { - subpath = d.keyToPath(subpath) - list = append(list, subpath) - } - query = objects.Next - if query == nil { - break - } - } - if path != "/" && len(list) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in Google Cloud Storage. - return nil, storagedriver.PathNotFoundError{Path: path} - } - return list, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the -// original object. -func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { - prefix := d.pathToDirKey(sourcePath) - gcsContext := d.context(context) - keys, err := d.listAll(gcsContext, prefix) - if err != nil { - return err - } - if len(keys) > 0 { - destPrefix := d.pathToDirKey(destPath) - copies := make([]string, 0, len(keys)) - sort.Strings(keys) - var err error - for _, key := range keys { - dest := destPrefix + key[len(prefix):] - _, err = storageCopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) - if err == nil { - copies = append(copies, dest) - } else { - break - } - } - // if an error occurred, attempt to cleanup the copies made - if err != nil { - for i := len(copies) - 1; i >= 0; i-- { - _ = storageDeleteObject(gcsContext, d.bucket, copies[i]) - } - return err - } - // delete originals - for i := len(keys) - 1; i >= 0; i-- { - err2 := storageDeleteObject(gcsContext, d.bucket, keys[i]) - if err2 != nil { - err = err2 - } - } - return err - } - _, err = storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) - if err != nil { - if status := err.(*googleapi.Error); status != nil { - if status.Code == http.StatusNotFound { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - } - return err - } - return storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) -} - -// listAll recursively lists all names of objects stored at "prefix" and its subpaths. -func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { - list := make([]string, 0, 64) - query := &storage.Query{} - query.Prefix = prefix - query.Versions = false - for { - objects, err := storageListObjects(d.context(context), d.bucket, query) - if err != nil { - return nil, err - } - for _, obj := range objects.Results { - // GCS does not guarantee strong consistency between - // DELETE and LIST operationsCheck that the object is not deleted, - // so filter out any objects with a non-zero time-deleted - if obj.Deleted.IsZero() { - list = append(list, obj.Name) - } - } - query = objects.Next - if query == nil { - break - } - } - return list, nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(context ctx.Context, path string) error { - prefix := d.pathToDirKey(path) - gcsContext := d.context(context) - keys, err := d.listAll(gcsContext, prefix) - if err != nil { - return err - } - if len(keys) > 0 { - sort.Sort(sort.Reverse(sort.StringSlice(keys))) - for _, key := range keys { - err := storageDeleteObject(gcsContext, d.bucket, key) - // GCS only guarantees eventual consistency, so listAll might return - // paths that no longer exist. If this happens, just ignore any not - // found error - if status, ok := err.(*googleapi.Error); ok { - if status.Code == http.StatusNotFound { - err = nil - } - } - if err != nil { - return err - } - } - return nil - } - err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) - if err != nil { - if status := err.(*googleapi.Error); status != nil { - if status.Code == http.StatusNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - } - } - return err -} - -func storageDeleteObject(context context.Context, bucket string, name string) error { - return retry(5, func() error { - return storage.DeleteObject(context, bucket, name) - }) -} - -func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { - var obj *storage.Object - err := retry(5, func() error { - var err error - obj, err = storage.StatObject(context, bucket, name) - return err - }) - return obj, err -} - -func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { - var objs *storage.Objects - err := retry(5, func() error { - var err error - objs, err = storage.ListObjects(context, bucket, q) - return err - }) - return objs, err -} - -func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { - var obj *storage.Object - err := retry(5, func() error { - var err error - obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) - return err - }) - return obj, err -} - -// URLFor returns a URL which may be used to retrieve the content stored at -// the given path, possibly using the given options. -// Returns ErrUnsupportedMethod if this driver has no privateKey -func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { - if d.privateKey == nil { - return "", storagedriver.ErrUnsupportedMethod{} - } - - name := d.pathToKey(path) - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - opts := &storage.SignedURLOptions{ - GoogleAccessID: d.email, - PrivateKey: d.privateKey, - Method: methodString, - Expires: expiresTime, - } - return storage.SignedURL(d.bucket, name, opts) -} - -func (d *driver) context(context ctx.Context) context.Context { - return cloud.WithContext(context, dummyProjectID, d.client) -} - -func (d *driver) pathToKey(path string) string { - return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") -} - -func (d *driver) pathToDirKey(path string) string { - return d.pathToKey(path) + "/" -} - -func (d *driver) keyToPath(key string) string { - return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go deleted file mode 100644 index b5735c0a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go +++ /dev/null @@ -1,262 +0,0 @@ -package inmemory - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "sync" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "inmemory" - -func init() { - factory.Register(driverName, &inMemoryDriverFactory{}) -} - -// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. -type inMemoryDriverFactory struct{} - -func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return New(), nil -} - -type driver struct { - root *dir - mutex sync.RWMutex -} - -// baseEmbed allows us to hide the Base embed. -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local map. -// Intended solely for example and testing purposes. -type Driver struct { - baseEmbed // embedded, hidden base driver. -} - -var _ storagedriver.StorageDriver = &Driver{} - -// New constructs a new Driver. -func New() *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - root: &dir{ - common: common{ - p: "/", - mod: time.Now(), - }, - }, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface. - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - return ioutil.ReadAll(rc) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - f, err := d.root.mkfile(p) - if err != nil { - // TODO(stevvooe): Again, we need to clarify when this is not a - // directory in StorageDriver API. - return fmt.Errorf("not a file") - } - - f.truncate() - f.WriteAt(contents, 0) - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - path = normalize(path) - found := d.root.find(path) - - if found.path() != path { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - if found.isdir() { - return nil, fmt.Errorf("%q is a directory", path) - } - - return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { - d.mutex.Lock() - defer d.mutex.Unlock() - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - normalized := normalize(path) - - f, err := d.root.mkfile(normalized) - if err != nil { - return 0, fmt.Errorf("not a file") - } - - // Unlock while we are reading from the source, in case we are reading - // from the same mfs instance. This can be fixed by a more granular - // locking model. - d.mutex.Unlock() - d.mutex.RLock() // Take the readlock to block other writers. - var buf bytes.Buffer - - nn, err = buf.ReadFrom(reader) - if err != nil { - // TODO(stevvooe): This condition is odd and we may need to clarify: - // we've read nn bytes from reader but have written nothing to the - // backend. What is the correct return value? Really, the caller needs - // to know that the reader has been advanced and reattempting the - // operation is incorrect. - d.mutex.RUnlock() - d.mutex.Lock() - return nn, err - } - - d.mutex.RUnlock() - d.mutex.Lock() - f.WriteAt(buf.Bytes(), offset) - return nn, err -} - -// Stat returns info about the provided path. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - found := d.root.find(path) - - if found.path() != normalized { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - fi := storagedriver.FileInfoFields{ - Path: path, - IsDir: found.isdir(), - ModTime: found.modtime(), - } - - if !fi.IsDir { - fi.Size = int64(len(found.(*file).data)) - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - - found := d.root.find(normalized) - - if !found.isdir() { - return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... - } - - entries, err := found.(*dir).list(normalized) - - if err != nil { - switch err { - case errNotExists: - return nil, storagedriver.PathNotFoundError{Path: path} - case errIsNotDir: - return nil, fmt.Errorf("not a directory") - default: - return nil, err - } - } - - return entries, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) - - err := d.root.move(normalizedSrc, normalizedDst) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: destPath} - default: - return err - } -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalized := normalize(path) - - err := d.root.delete(normalized) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: path} - default: - return err - } -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go deleted file mode 100644 index cdefacfd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go +++ /dev/null @@ -1,338 +0,0 @@ -package inmemory - -import ( - "fmt" - "io" - "path" - "sort" - "strings" - "time" -) - -var ( - errExists = fmt.Errorf("exists") - errNotExists = fmt.Errorf("notexists") - errIsNotDir = fmt.Errorf("notdir") - errIsDir = fmt.Errorf("isdir") -) - -type node interface { - name() string - path() string - isdir() bool - modtime() time.Time -} - -// dir is the central type for the memory-based storagedriver. All operations -// are dispatched from a root dir. -type dir struct { - common - - // TODO(stevvooe): Use sorted slice + search. - children map[string]node -} - -var _ node = &dir{} - -func (d *dir) isdir() bool { - return true -} - -// add places the node n into dir d. -func (d *dir) add(n node) { - if d.children == nil { - d.children = make(map[string]node) - } - - d.children[n.name()] = n - d.mod = time.Now() -} - -// find searches for the node, given path q in dir. If the node is found, it -// will be returned. If the node is not found, the closet existing parent. If -// the node is found, the returned (node).path() will match q. -func (d *dir) find(q string) node { - q = strings.Trim(q, "/") - i := strings.Index(q, "/") - - if q == "" { - return d - } - - if i == 0 { - panic("shouldn't happen, no root paths") - } - - var component string - if i < 0 { - // No more path components - component = q - } else { - component = q[:i] - } - - child, ok := d.children[component] - if !ok { - // Node was not found. Return p and the current node. - return d - } - - if child.isdir() { - // traverse down! - q = q[i+1:] - return child.(*dir).find(q) - } - - return child -} - -func (d *dir) list(p string) ([]string, error) { - n := d.find(p) - - if n.path() != p { - return nil, errNotExists - } - - if !n.isdir() { - return nil, errIsNotDir - } - - var children []string - for _, child := range n.(*dir).children { - children = append(children, child.path()) - } - - sort.Strings(children) - return children, nil -} - -// mkfile or return the existing one. returns an error if it exists and is a -// directory. Essentially, this is open or create. -func (d *dir) mkfile(p string) (*file, error) { - n := d.find(p) - if n.path() == p { - if n.isdir() { - return nil, errIsDir - } - - return n.(*file), nil - } - - dirpath, filename := path.Split(p) - // Make any non-existent directories - n, err := d.mkdirs(dirpath) - if err != nil { - return nil, err - } - - dd := n.(*dir) - n = &file{ - common: common{ - p: path.Join(dd.path(), filename), - mod: time.Now(), - }, - } - - dd.add(n) - return n.(*file), nil -} - -// mkdirs creates any missing directory entries in p and returns the result. -func (d *dir) mkdirs(p string) (*dir, error) { - p = normalize(p) - - n := d.find(p) - - if !n.isdir() { - // Found something there - return nil, errIsNotDir - } - - if n.path() == p { - return n.(*dir), nil - } - - dd := n.(*dir) - - relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") - - if relative == "" { - return dd, nil - } - - components := strings.Split(relative, "/") - for _, component := range components { - d, err := dd.mkdir(component) - - if err != nil { - // This should actually never happen, since there are no children. - return nil, err - } - dd = d - } - - return dd, nil -} - -// mkdir creates a child directory under d with the given name. -func (d *dir) mkdir(name string) (*dir, error) { - if name == "" { - return nil, fmt.Errorf("invalid dirname") - } - - _, ok := d.children[name] - if ok { - return nil, errExists - } - - child := &dir{ - common: common{ - p: path.Join(d.path(), name), - mod: time.Now(), - }, - } - d.add(child) - d.mod = time.Now() - - return child, nil -} - -func (d *dir) move(src, dst string) error { - dstDirname, _ := path.Split(dst) - - dp, err := d.mkdirs(dstDirname) - if err != nil { - return err - } - - srcDirname, srcFilename := path.Split(src) - sp := d.find(srcDirname) - - if normalize(srcDirname) != normalize(sp.path()) { - return errNotExists - } - - spd, ok := sp.(*dir) - if !ok { - return errIsNotDir // paranoid. - } - - s, ok := spd.children[srcFilename] - if !ok { - return errNotExists - } - - delete(spd.children, srcFilename) - - switch n := s.(type) { - case *dir: - n.p = dst - case *file: - n.p = dst - } - - dp.add(s) - - return nil -} - -func (d *dir) delete(p string) error { - dirname, filename := path.Split(p) - parent := d.find(dirname) - - if normalize(dirname) != normalize(parent.path()) { - return errNotExists - } - - if _, ok := parent.(*dir).children[filename]; !ok { - return errNotExists - } - - delete(parent.(*dir).children, filename) - return nil -} - -// dump outputs a primitive directory structure to stdout. -func (d *dir) dump(indent string) { - fmt.Println(indent, d.name()+"/") - - for _, child := range d.children { - if child.isdir() { - child.(*dir).dump(indent + "\t") - } else { - fmt.Println(indent, child.name()) - } - - } -} - -func (d *dir) String() string { - return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) -} - -// file stores actual data in the fs tree. It acts like an open, seekable file -// where operations are conducted through ReadAt and WriteAt. Use it with -// SectionReader for the best effect. -type file struct { - common - data []byte -} - -var _ node = &file{} - -func (f *file) isdir() bool { - return false -} - -func (f *file) truncate() { - f.data = f.data[:0] -} - -func (f *file) sectionReader(offset int64) io.Reader { - return io.NewSectionReader(f, offset, int64(len(f.data))-offset) -} - -func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { - return copy(p, f.data[offset:]), nil -} - -func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { - off := int(offset) - if cap(f.data) < off+len(p) { - data := make([]byte, len(f.data), off+len(p)) - copy(data, f.data) - f.data = data - } - - f.mod = time.Now() - f.data = f.data[:off+len(p)] - - return copy(f.data[off:off+len(p)], p), nil -} - -func (f *file) String() string { - return fmt.Sprintf("&file{path: %q}", f.p) -} - -// common provides shared fields and methods for node implementations. -type common struct { - p string - mod time.Time -} - -func (c *common) name() string { - _, name := path.Split(c.p) - return name -} - -func (c *common) path() string { - return c.p -} - -func (c *common) modtime() time.Time { - return c.mod -} - -func normalize(p string) string { - return "/" + strings.Trim(p, "/") -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go deleted file mode 100644 index 31c00afc..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package middleware - cloudfront wrapper for storage libs -// N.B. currently only works with S3, not arbitrary sites -// -package middleware - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "time" - - "github.com/AdRoll/goamz/cloudfront" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" -) - -// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that -// constructs temporary signed CloudFront URLs from the storagedriver layer URL, -// then issues HTTP Temporary Redirects to this CloudFront content URL. -type cloudFrontStorageMiddleware struct { - storagedriver.StorageDriver - cloudfront *cloudfront.CloudFront - duration time.Duration -} - -var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} - -// newCloudFrontLayerHandler constructs and returns a new CloudFront -// LayerHandler implementation. -// Required options: baseurl, privatekey, keypairid -func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { - base, ok := options["baseurl"] - if !ok { - return nil, fmt.Errorf("No baseurl provided") - } - baseURL, ok := base.(string) - if !ok { - return nil, fmt.Errorf("baseurl must be a string") - } - pk, ok := options["privatekey"] - if !ok { - return nil, fmt.Errorf("No privatekey provided") - } - pkPath, ok := pk.(string) - if !ok { - return nil, fmt.Errorf("privatekey must be a string") - } - kpid, ok := options["keypairid"] - if !ok { - return nil, fmt.Errorf("No keypairid provided") - } - keypairID, ok := kpid.(string) - if !ok { - return nil, fmt.Errorf("keypairid must be a string") - } - - pkBytes, err := ioutil.ReadFile(pkPath) - if err != nil { - return nil, fmt.Errorf("Failed to read privatekey file: %s", err) - } - - block, _ := pem.Decode([]byte(pkBytes)) - if block == nil { - return nil, fmt.Errorf("Failed to decode private key as an rsa private key") - } - privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - - cf := cloudfront.New(baseURL, privateKey, keypairID) - - duration := 20 * time.Minute - d, ok := options["duration"] - if ok { - switch d := d.(type) { - case time.Duration: - duration = d - case string: - dur, err := time.ParseDuration(d) - if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) - } - duration = dur - } - } - - return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil -} - -// S3BucketKeyer is any type that is capable of returning the S3 bucket key -// which should be cached by AWS CloudFront. -type S3BucketKeyer interface { - S3BucketKey(path string) string -} - -// Resolve returns an http.Handler which can serve the contents of the given -// Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - // TODO(endophage): currently only supports S3 - keyer, ok := lh.StorageDriver.(S3BucketKeyer) - if !ok { - context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") - return lh.StorageDriver.URLFor(ctx, path, options) - } - - cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) - if err != nil { - return "", err - } - return cfURL, nil -} - -// init registers the cloudfront layerHandler backend. -func init() { - storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go deleted file mode 100644 index 7e40a8dd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go +++ /dev/null @@ -1,39 +0,0 @@ -package storagemiddleware - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// InitFunc is the type of a StorageMiddleware factory function and is -// used to register the constructor for different StorageMiddleware backends. -type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) - -var storageMiddlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a StorageMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if storageMiddlewares == nil { - storageMiddlewares = make(map[string]InitFunc) - } - if _, exists := storageMiddlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - storageMiddlewares[name] = initFunc - - return nil -} - -// Get constructs a StorageMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { - if storageMiddlewares != nil { - if initFunc, exists := storageMiddlewares[name]; exists { - return initFunc(storageDriver, options) - } - } - - return nil, fmt.Errorf("no storage middleware registered with name: %s", name) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go deleted file mode 100644 index d1bc932f..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package oss implements the Aliyun OSS Storage driver backend. Support can be -// enabled by including the "include_oss" build tag. -package oss diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go deleted file mode 100644 index 67215bc2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go +++ /dev/null @@ -1,789 +0,0 @@ -// Package oss provides a storagedriver.StorageDriver implementation to -// store blobs in Aliyun OSS cloud storage. -// -// This package leverages the denverdino/aliyungo client library for interfacing with -// oss. -// -// Because OSS is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// +build include_oss - -package oss - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/distribution/context" - - "github.com/Sirupsen/logrus" - "github.com/denverdino/aliyungo/oss" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "oss" - -// minChunkSize defines the minimum multipart upload chunk size -// OSS API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize -const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk - -// listMax is the largest amount of objects you can request from OSS in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKeyID string - AccessKeySecret string - Bucket string - Region oss.Region - Internal bool - Encrypt bool - Secure bool - ChunkSize int64 - RootDirectory string - Endpoint string -} - -func init() { - factory.Register(driverName, &ossDriverFactory{}) -} - -// ossDriverFactory implements the factory.StorageDriverFactory interface -type ossDriverFactory struct{} - -func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Client *oss.Client - Bucket *oss.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskeyid"] - if !ok { - return nil, fmt.Errorf("No accesskeyid parameter provided") - } - secretKey, ok := parameters["accesskeysecret"] - if !ok { - return nil, fmt.Errorf("No accesskeysecret parameter provided") - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - internalBool := false - internal, ok := parameters["internal"] - if ok { - internalBool, ok = internal.(bool) - if !ok { - return nil, fmt.Errorf("The internal parameter should be a boolean") - } - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - endpoint, ok := parameters["endpoint"] - if !ok { - endpoint = "" - } - - params := DriverParameters{ - AccessKeyID: fmt.Sprint(accessKey), - AccessKeySecret: fmt.Sprint(secretKey), - Bucket: fmt.Sprint(bucket), - Region: oss.Region(fmt.Sprint(regionName)), - ChunkSize: chunkSize, - RootDirectory: fmt.Sprint(rootDirectory), - Encrypt: encryptBool, - Secure: secureBool, - Internal: internalBool, - Endpoint: fmt.Sprint(endpoint), - } - - return New(params) -} - -// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) - client.SetEndpoint(params.Endpoint) - bucket := client.Bucket(params.Bucket) - client.SetDebug(false) - - // Validate that the given credentials have at least read permissions in the - // given bucket scope. - if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { - return nil, err - } - - // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new OSS client while another one is running on the same bucket. - - d := &driver{ - Client: client, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.ossPath(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) - if err != nil { - return nil, parseError(path, err) - } - - // Due to Aliyun OSS API, status 200 and whole object will be return instead of an - // InvalidRange error when range is invalid. - // - // OSS sever will always return http.StatusPartialContent if range is acceptable. - if resp.StatusCode != http.StatusPartialContent { - resp.Body.Close() - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return resp.Body, nil -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []oss.Part{} - var part oss.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the OSS - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying OSS library should handle it, it doesn't seem to - // be part of the shouldRetry function (see denverdino/aliyungo/oss). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part oss.Part - - part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout) - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.ossPath(path), nil) - if err != nil { - if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != http.StatusNotFound { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.ossPath(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && opath[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.ossPath("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) - - err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), - d.getContentType(), - getPermissions(), - oss.Options{}) - if err != nil { - logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - ossObjects := make([]oss.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - ossObjects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) - testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) - logrus.Infof("testURL: %s", testURL) - return testURL, nil -} - -func (d *driver) ossPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -func parseError(path string, err error) error { - if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - ossErr, ok := err.(*oss.Error) - return ok && ossErr.Code == code -} - -func (d *driver) getOptions() oss.Options { - return oss.Options{ServerSideEncryption: d.Encrypt} -} - -func getPermissions() oss.ACL { - return oss.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) -} - -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go deleted file mode 100644 index 655c68a3..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rados implements the rados storage driver backend. Support can be -// enabled by including the "include_rados" build tag. -package rados diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go deleted file mode 100644 index c2be528e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go +++ /dev/null @@ -1,632 +0,0 @@ -// +build include_rados - -package rados - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "path" - "strconv" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/uuid" - "github.com/noahdesu/go-ceph/rados" -) - -const driverName = "rados" - -// Prefix all the stored blob -const objectBlobPrefix = "blob:" - -// Stripes objects size to 4M -const defaultChunkSize = 4 << 20 -const defaultXattrTotalSizeName = "total-size" - -// Max number of keys fetched from omap at each read operation -const defaultKeysFetched = 1 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - poolname string - username string - chunksize uint64 -} - -func init() { - factory.Register(driverName, &radosDriverFactory{}) -} - -// radosDriverFactory implements the factory.StorageDriverFactory interface -type radosDriverFactory struct{} - -func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn *rados.Conn - Ioctx *rados.IOContext - chunksize uint64 -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - poolname: the ceph pool name -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - - pool, ok := parameters["poolname"] - if !ok { - return nil, fmt.Errorf("No poolname parameter provided") - } - - username, ok := parameters["username"] - if !ok { - username = "" - } - - chunksize := uint64(defaultChunkSize) - chunksizeParam, ok := parameters["chunksize"] - if ok { - chunksize, ok = chunksizeParam.(uint64) - if !ok { - return nil, fmt.Errorf("The chunksize parameter should be a number") - } - } - - params := DriverParameters{ - fmt.Sprint(pool), - fmt.Sprint(username), - chunksize, - } - - return New(params) -} - -// New constructs a new Driver -func New(params DriverParameters) (*Driver, error) { - var conn *rados.Conn - var err error - - if params.username != "" { - log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) - conn, err = rados.NewConnWithUser(params.username) - } else { - log.Infof("Opening connection to pool %s", params.poolname) - conn, err = rados.NewConn() - } - - if err != nil { - return nil, err - } - - err = conn.ReadDefaultConfigFile() - if err != nil { - return nil, err - } - - err = conn.Connect() - if err != nil { - return nil, err - } - - log.Infof("Connected") - - ioctx, err := conn.OpenIOContext(params.poolname) - - log.Infof("Connected to pool %s", params.poolname) - - if err != nil { - return nil, err - } - - d := &driver{ - Ioctx: ioctx, - Conn: conn, - chunksize: params.chunksize, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -type readStreamReader struct { - driver *driver - oid string - size uint64 - offset uint64 -} - -func (r *readStreamReader) Read(b []byte) (n int, err error) { - // Determine the part available to read - bufferOffset := uint64(0) - bufferSize := uint64(len(b)) - - // End of the object, read less than the buffer size - if bufferSize > r.size-r.offset { - bufferSize = r.size - r.offset - } - - // Fill `b` - for bufferOffset < bufferSize { - // Get the offset in the object chunk - chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) - - // Determine the best size to read - bufferEndOffset := bufferSize - if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { - bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) - } - - // Read the chunk - n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) - - if err != nil { - return int(bufferOffset), err - } - - bufferOffset += uint64(n) - r.offset += uint64(n) - } - - // EOF if the offset is at the end of the object - if r.offset == r.size { - return int(bufferOffset), io.EOF - } - - return int(bufferOffset), nil -} - -func (r *readStreamReader) Close() error { - return nil -} - -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // get object stat - stat, err := d.Stat(ctx, path) - - if err != nil { - return nil, err - } - - if offset > stat.Size() { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return &readStreamReader{ - driver: d, - oid: oid, - size: uint64(stat.Size()), - offset: uint64(offset), - }, nil -} - -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - buf := make([]byte, d.chunksize) - totalRead = 0 - - oid, err := d.getOid(path) - if err != nil { - switch err.(type) { - // Trying to write new object, generate new blob identifier for it - case storagedriver.PathNotFoundError: - oid = d.generateOid() - err = d.putOid(path, oid) - if err != nil { - return 0, err - } - default: - return 0, err - } - } else { - // Check total object size only for existing ones - totalSize, err := d.getXattrTotalSize(ctx, oid) - if err != nil { - return 0, err - } - - // If offset if after the current object size, fill the gap with zeros - for totalSize < uint64(offset) { - sizeToWrite := d.chunksize - if totalSize-uint64(offset) < sizeToWrite { - sizeToWrite = totalSize - uint64(offset) - } - - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) - err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) - if err != nil { - return totalRead, err - } - - totalSize += sizeToWrite - } - } - - // Writer - for { - // Align to chunk size - sizeRead := uint64(0) - sizeToRead := uint64(offset+totalRead) % d.chunksize - if sizeToRead == 0 { - sizeToRead = d.chunksize - } - - // Read from `reader` - for sizeRead < sizeToRead { - nn, err := reader.Read(buf[sizeRead:sizeToRead]) - sizeRead += uint64(nn) - - if err != nil { - if err != io.EOF { - return totalRead, err - } - - break - } - } - - // End of file and nothing was read - if sizeRead == 0 { - break - } - - // Write chunk object - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) - err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) - - if err != nil { - return totalRead, err - } - - // Update total object size as xattr in the first chunk of the object - err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) - if err != nil { - return totalRead, err - } - - totalRead += int64(sizeRead) - - // End of file - if sizeRead < sizeToRead { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // the path is a virtual directory? - if oid == "" { - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: 0, - IsDir: true, - }, - }, nil - } - - // stat first chunk - stat, err := d.Ioctx.Stat(oid + "-0") - - if err != nil { - return nil, err - } - - // get total size of chunked object - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(totalSize), - ModTime: stat.ModTime, - }, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { - files, err := d.listDirectoryOid(dirPath) - - if err != nil { - return nil, storagedriver.PathNotFoundError{Path: dirPath} - } - - keys := make([]string, 0, len(files)) - for k := range files { - if k != dirPath { - keys = append(keys, path.Join(dirPath, k)) - } - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - // Get oid - oid, err := d.getOid(sourcePath) - - if err != nil { - return err - } - - // Move reference - err = d.putOid(destPath, oid) - - if err != nil { - return err - } - - // Delete old reference - err = d.deleteOid(sourcePath) - - if err != nil { - return err - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, objectPath string) error { - // Get oid - oid, err := d.getOid(objectPath) - - if err != nil { - return err - } - - // Deleting virtual directory - if oid == "" { - objects, err := d.listDirectoryOid(objectPath) - if err != nil { - return err - } - - for object := range objects { - err = d.Delete(ctx, path.Join(objectPath, object)) - if err != nil { - return err - } - } - } else { - // Delete object chunks - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return err - } - - for offset := uint64(0); offset < totalSize; offset += d.chunksize { - chunkName, _ := d.getChunkNameFromOffset(oid, offset) - - err = d.Ioctx.Delete(chunkName) - if err != nil { - return err - } - } - - // Delete reference - err = d.deleteOid(objectPath) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} - -// Generate a blob identifier -func (d *driver) generateOid() string { - return objectBlobPrefix + uuid.Generate().String() -} - -// Reference a object and its hierarchy -func (d *driver) putOid(objectPath string, oid string) error { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - createParentReference := true - - // After creating this reference, skip the parents referencing since the - // hierarchy already exists - if oid == "" { - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - if (err == nil) && (len(firstReference) > 0) { - createParentReference = false - } - } - - oids := map[string][]byte{ - base: []byte(oid), - } - - // Reference object - err := d.Ioctx.SetOmap(directory, oids) - if err != nil { - return err - } - - // Esure parent virtual directories - if createParentReference { - return d.putOid(directory, "") - } - - return nil -} - -// Get the object identifier from an object name -func (d *driver) getOid(objectPath string) (string, error) { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - - files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) - - if (err != nil) || (files[base] == nil) { - return "", storagedriver.PathNotFoundError{Path: objectPath} - } - - return string(files[base]), nil -} - -// List the objects of a virtual directory -func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { - return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) -} - -// Remove a file from the files hierarchy -func (d *driver) deleteOid(objectPath string) error { - // Remove object reference - directory := path.Dir(objectPath) - base := path.Base(objectPath) - err := d.Ioctx.RmOmapKeys(directory, []string{base}) - - if err != nil { - return err - } - - // Remove virtual directory if empty (no more references) - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - - if err != nil { - return err - } - - if len(firstReference) == 0 { - // Delete omap - err := d.Ioctx.Delete(directory) - - if err != nil { - return err - } - - // Remove reference on parent omaps - if directory != "" { - return d.deleteOid(directory) - } - } - - return nil -} - -// Takes an offset in an chunked object and return the chunk name and a new -// offset in this chunk object -func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { - chunkID := offset / d.chunksize - chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) - chunkedOffset := offset % d.chunksize - return chunkedOid, chunkedOffset -} - -// Set the total size of a chunked object `oid` -func (d *driver) setXattrTotalSize(oid string, size uint64) error { - // Convert uint64 `size` to []byte - xattr := make([]byte, binary.MaxVarintLen64) - binary.LittleEndian.PutUint64(xattr, size) - - // Save the total size as a xattr in the first chunk - return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) -} - -// Get the total size of the chunked object `oid` stored as xattr -func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { - // Fetch xattr as []byte - xattr := make([]byte, binary.MaxVarintLen64) - xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) - - if err != nil { - return 0, err - } - - if xattrLength != len(xattr) { - context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) - return 0, storagedriver.PathNotFoundError{Path: oid} - } - - // Convert []byte as uint64 - totalSize := binary.LittleEndian.Uint64(xattr) - - return totalSize, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go deleted file mode 100644 index 7bb23a85..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go +++ /dev/null @@ -1,829 +0,0 @@ -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the AdRoll/goamz client library for interfacing with -// s3. -// -// Because s3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that s3 guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. -package s3 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "s3" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region aws.Region - Encrypt bool - Secure bool - V4Auth bool - ChunkSize int64 - RootDirectory string -} - -func init() { - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket *s3.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskey"] - if !ok { - accessKey = "" - } - secretKey, ok := parameters["secretkey"] - if !ok { - secretKey = "" - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := aws.GetRegion(fmt.Sprint(regionName)) - if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - v4AuthBool := false - v4Auth, ok := parameters["v4auth"] - if ok { - v4AuthBool, ok = v4Auth.(bool) - if !ok { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - encryptBool, - secureBool, - v4AuthBool, - chunkSize, - fmt.Sprint(rootDirectory), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) - if err != nil { - return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) - } - - if !params.Secure { - params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) - } - - s3obj := s3.New(auth, params.Region) - bucket := s3obj.Bucket(params.Bucket) - - if params.V4Auth { - s3obj.Signature = aws.V4Signature - } else { - if params.Region.Name == "eu-central-1" { - return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") - } - } - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.s3Path(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) - if err != nil { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []s3.Part{} - var part s3.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the s3 - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying s3 library should handle it, it doesn't seem to - // be part of the shouldRetry function (see AdRoll/goamz/s3). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part s3.Part - - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the s3 package does not. We may add s3 - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to s3 slows to a crawl. If the RequestTimeout - // ends up getting added to the s3 library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *s3.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.s3Path(path), nil) - if err != nil { - if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), - s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - s3Objects := make([]s3.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - s3Objects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*aws.Error) - return ok && s3err.Code == code -} - -func (d *driver) getOptions() s3.Options { - return s3.Options{SSE: d.Encrypt} -} - -func getPermissions() s3.ACL { - return s3.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) -} - -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go deleted file mode 100644 index dc8bdc8d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go +++ /dev/null @@ -1,144 +0,0 @@ -package driver - -import ( - "fmt" - "io" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution/context" -) - -// Version is a string representing the storage driver version, of the form -// Major.Minor. -// The registry must accept storage drivers with equal major version and greater -// minor version, but may not be compatible with older storage driver versions. -type Version string - -// Major returns the major (primary) component of a version. -func (version Version) Major() uint { - majorPart := strings.Split(string(version), ".")[0] - major, _ := strconv.ParseUint(majorPart, 10, 0) - return uint(major) -} - -// Minor returns the minor (secondary) component of a version. -func (version Version) Minor() uint { - minorPart := strings.Split(string(version), ".")[1] - minor, _ := strconv.ParseUint(minorPart, 10, 0) - return uint(minor) -} - -// CurrentVersion is the current storage driver Version. -const CurrentVersion Version = "0.1" - -// StorageDriver defines methods that a Storage Driver must implement for a -// filesystem-like key/value object storage. -type StorageDriver interface { - // Name returns the human-readable "name" of the driver, useful in error - // messages and logging. By convention, this will just be the registration - // name, but drivers may provide other information here. - Name() string - - // GetContent retrieves the content stored at "path" as a []byte. - // This should primarily be used for small objects. - GetContent(ctx context.Context, path string) ([]byte, error) - - // PutContent stores the []byte content at a location designated by "path". - // This should primarily be used for small objects. - PutContent(ctx context.Context, path string, content []byte) error - - // ReadStream retrieves an io.ReadCloser for the content stored at "path" - // with a given byte offset. - // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) - - // WriteStream stores the contents of the provided io.ReadCloser at a - // location designated by the given path. - // May be used to resume writing a stream by providing a nonzero offset. - // The offset must be no larger than the CurrentSize for this path. - WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) - - // Stat retrieves the FileInfo for the given path, including the current - // size in bytes and the creation time. - Stat(ctx context.Context, path string) (FileInfo, error) - - // List returns a list of the objects that are direct descendants of the - //given path. - List(ctx context.Context, path string) ([]string, error) - - // Move moves an object stored at sourcePath to destPath, removing the - // original object. - // Note: This may be no more efficient than a copy followed by a delete for - // many implementations. - Move(ctx context.Context, sourcePath string, destPath string) error - - // Delete recursively deletes all objects stored at "path" and its subpaths. - Delete(ctx context.Context, path string) error - - // URLFor returns a URL which may be used to retrieve the content stored at - // the given path, possibly using the given options. - // May return an ErrUnsupportedMethod in certain StorageDriver - // implementations. - URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) -} - -// PathRegexp is the regular expression which each file path must match. A -// file path is absolute, beginning with a slash and containing a positive -// number of path components separated by slashes, where each component is -// restricted to alphanumeric characters or a period, underscore, or -// hyphen. -var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) - -// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. -type ErrUnsupportedMethod struct { - DriverName string -} - -func (err ErrUnsupportedMethod) Error() string { - return fmt.Sprintf("%s: unsupported method", err.DriverName) -} - -// PathNotFoundError is returned when operating on a nonexistent path. -type PathNotFoundError struct { - Path string - DriverName string -} - -func (err PathNotFoundError) Error() string { - return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) -} - -// InvalidPathError is returned when the provided path is malformed. -type InvalidPathError struct { - Path string - DriverName string -} - -func (err InvalidPathError) Error() string { - return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) -} - -// InvalidOffsetError is returned when attempting to read or write from an -// invalid offset. -type InvalidOffsetError struct { - Path string - Offset int64 - DriverName string -} - -func (err InvalidOffsetError) Error() string { - return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) -} - -// Error is a catch-all error type which captures an error string and -// the driver type on which it occured. -type Error struct { - DriverName string - Enclosed error -} - -func (err Error) Error() string { - return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go deleted file mode 100644 index 86bce794..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go +++ /dev/null @@ -1,812 +0,0 @@ -// Package swift provides a storagedriver.StorageDriver implementation to -// store blobs in Openstack Swift object storage. -// -// This package leverages the ncw/swift client library for interfacing with -// Swift. -// -// It supports both TempAuth authentication and Keystone authentication -// (up to version 3). -// -// As Swift has a limit on the size of a single uploaded object (by default -// this is 5GB), the driver makes use of the Swift Large Object Support -// (http://docs.openstack.org/developer/swift/overview_large_objects.html). -// Only one container is used for both manifests and data objects. Manifests -// are stored in the 'files' pseudo directory, data objects are stored under -// 'segments'. -package swift - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "crypto/tls" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/mitchellh/mapstructure" - "github.com/ncw/swift" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/version" -) - -const driverName = "swift" - -// defaultChunkSize defines the default size of a segment -const defaultChunkSize = 20 * 1024 * 1024 - -// minChunkSize defines the minimum size of a segment -const minChunkSize = 1 << 20 - -// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded -var readAfterWriteTimeout = 15 * time.Second - -// readAfterWriteWait defines the time to sleep between two retries -var readAfterWriteWait = 200 * time.Millisecond - -// Parameters A struct that encapsulates all of the driver parameters after all values have been set -type Parameters struct { - Username string - Password string - AuthURL string - Tenant string - TenantID string - Domain string - DomainID string - TrustID string - Region string - Container string - Prefix string - InsecureSkipVerify bool - ChunkSize int - SecretKey string - AccessKey string - TempURLContainerKey bool - TempURLMethods []string -} - -// swiftInfo maps the JSON structure returned by Swift /info endpoint -type swiftInfo struct { - Swift struct { - Version string `mapstructure:"version"` - } - Tempurl struct { - Methods []string `mapstructure:"methods"` - } -} - -func init() { - factory.Register(driverName, &swiftDriverFactory{}) -} - -// swiftDriverFactory implements the factory.StorageDriverFactory interface -type swiftDriverFactory struct{} - -func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn swift.Connection - Container string - Prefix string - BulkDeleteSupport bool - ChunkSize int - SecretKey string - AccessKey string - TempURLContainerKey bool - TempURLMethods []string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift -// Objects are stored at absolute keys in the provided container. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - username -// - password -// - authurl -// - container -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - params := Parameters{ - ChunkSize: defaultChunkSize, - InsecureSkipVerify: false, - } - - if err := mapstructure.Decode(parameters, ¶ms); err != nil { - return nil, err - } - - if params.Username == "" { - return nil, fmt.Errorf("No username parameter provided") - } - - if params.Password == "" { - return nil, fmt.Errorf("No password parameter provided") - } - - if params.AuthURL == "" { - return nil, fmt.Errorf("No authurl parameter provided") - } - - if params.Container == "" { - return nil, fmt.Errorf("No container parameter provided") - } - - if params.ChunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) - } - - return New(params) -} - -// New constructs a new Driver with the given Openstack Swift credentials and container name -func New(params Parameters) (*Driver, error) { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - MaxIdleConnsPerHost: 2048, - TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, - } - - ct := swift.Connection{ - UserName: params.Username, - ApiKey: params.Password, - AuthUrl: params.AuthURL, - Region: params.Region, - UserAgent: "distribution/" + version.Version, - Tenant: params.Tenant, - TenantId: params.TenantID, - Domain: params.Domain, - DomainId: params.DomainID, - TrustId: params.TrustID, - Transport: transport, - ConnectTimeout: 60 * time.Second, - Timeout: 15 * 60 * time.Second, - } - err := ct.Authenticate() - if err != nil { - return nil, fmt.Errorf("Swift authentication failed: %s", err) - } - - if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { - if err := ct.ContainerCreate(params.Container, nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) - } - } else if err != nil { - return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) - } - - d := &driver{ - Conn: ct, - Container: params.Container, - Prefix: params.Prefix, - ChunkSize: params.ChunkSize, - TempURLMethods: make([]string, 0), - AccessKey: params.AccessKey, - } - - info := swiftInfo{} - if config, err := d.Conn.QueryInfo(); err == nil { - _, d.BulkDeleteSupport = config["bulk_delete"] - - if err := mapstructure.Decode(config, &info); err == nil { - d.TempURLContainerKey = info.Swift.Version >= "2.3.0" - d.TempURLMethods = info.Tempurl.Methods - } - } else { - d.TempURLContainerKey = params.TempURLContainerKey - d.TempURLMethods = params.TempURLMethods - } - - if len(d.TempURLMethods) > 0 { - secretKey := params.SecretKey - if secretKey == "" { - secretKey, _ = generateSecret() - } - - // Since Swift 2.2.2, we can now set secret keys on containers - // in addition to the account secret keys. Use them in preference. - if d.TempURLContainerKey { - _, containerHeaders, err := d.Conn.Container(d.Container) - if err != nil { - return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) - } - - d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] - if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { - m := swift.Metadata{} - m["temp-url-key"] = secretKey - if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { - d.SecretKey = secretKey - } - } - } else { - // Use the account secret key - _, accountHeaders, err := d.Conn.Account() - if err != nil { - return nil, fmt.Errorf("Failed to fetch account info (%s)", err) - } - - d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] - if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { - m := swift.Metadata{} - m["temp-url-key"] = secretKey - if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { - d.SecretKey = secretKey - } - } - } - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" - - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - return file, err -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { - var ( - segments []swift.Object - multi io.Reader - paddingReader io.Reader - currentLength int64 - cursor int64 - segmentPath string - ) - - partNumber := 1 - chunkSize := int64(d.ChunkSize) - zeroBuf := make([]byte, d.ChunkSize) - hash := md5.New() - - getSegment := func() string { - return fmt.Sprintf("%s/%016d", segmentPath, partNumber) - } - - max := func(a int64, b int64) int64 { - if a > b { - return a - } - return b - } - - createManifest := true - info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - manifest, ok := headers["X-Object-Manifest"] - if !ok { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err - } - if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { - return 0, err - } - segments = append(segments, info) - } else { - _, segmentPath = parseManifest(manifest) - if segments, err = d.getAllSegments(segmentPath); err != nil { - return 0, err - } - createManifest = false - } - currentLength = info.Bytes - } else if err == swift.ObjectNotFound { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err - } - } else { - return 0, err - } - - // First, we skip the existing segments that are not modified by this call - for i := range segments { - if offset < cursor+segments[i].Bytes { - break - } - cursor += segments[i].Bytes - hash.Write([]byte(segments[i].Hash)) - partNumber++ - } - - // We reached the end of the file but we haven't reached 'offset' yet - // Therefore we add blocks of zeros - if offset >= currentLength { - for offset-currentLength >= chunkSize { - // Insert a block a zero - headers, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) - if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err - } - currentLength += chunkSize - partNumber++ - hash.Write([]byte(headers["Etag"])) - } - - cursor = currentLength - paddingReader = bytes.NewReader(zeroBuf) - } else if offset-cursor > 0 { - // Offset is inside the current segment : we need to read the - // data from the beginning of the segment to offset - file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) - if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err - } - defer file.Close() - paddingReader = file - } - - readers := []io.Reader{} - if paddingReader != nil { - readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) - } - readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) - multi = io.MultiReader(readers...) - - writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { - currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} - } - return false, bytesRead, err - } - - segmentHash := md5.New() - writer := io.MultiWriter(currentSegment, segmentHash) - - n, err := io.Copy(writer, multi) - if err != nil { - return false, bytesRead, err - } - - if n > 0 { - defer func() { - closeError := currentSegment.Close() - if err != nil { - err = closeError - } - hexHash := hex.EncodeToString(segmentHash.Sum(nil)) - hash.Write([]byte(hexHash)) - }() - bytesRead += n - max(0, offset-cursor) - } - - if n < chunkSize { - // We wrote all the data - if cursor+n < currentLength { - // Copy the end of the chunk - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - _, copyErr := io.Copy(writer, file) - - if err := file.Close(); err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - if copyErr != nil { - return false, bytesRead, copyErr - } - } - - return true, bytesRead, nil - } - - multi = io.LimitReader(reader, chunkSize) - cursor += chunkSize - partNumber++ - - return false, bytesRead, nil - } - - finished := false - read := int64(0) - bytesRead := int64(0) - for finished == false { - finished, read, err = writeSegment(getSegment()) - bytesRead += read - if err != nil { - return bytesRead, err - } - } - - for ; partNumber < len(segments); partNumber++ { - hash.Write([]byte(segments[partNumber].Hash)) - } - - if createManifest { - if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { - return 0, err - } - } - - expectedHash := hex.EncodeToString(hash.Sum(nil)) - waitingTime := readAfterWriteWait - endTime := time.Now().Add(readAfterWriteTimeout) - for { - var infos swift.Object - if infos, _, err = d.Conn.Object(d.Container, d.swiftPath(path)); err == nil { - if strings.Trim(infos.Hash, "\"") == expectedHash { - return bytesRead, nil - } - err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) - } - if time.Now().Add(waitingTime).After(endTime) { - break - } - time.Sleep(waitingTime) - waitingTime *= 2 - } - - return bytesRead, err -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - swiftPath := d.swiftPath(path) - opts := &swift.ObjectsOpts{ - Prefix: swiftPath, - Delimiter: '/', - } - - objects, err := d.Conn.ObjectsAll(d.Container, opts) - if err != nil { - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), - } - - for _, obj := range objects { - if obj.PseudoDirectory && obj.Name == swiftPath+"/" { - fi.IsDir = true - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } else if obj.Name == swiftPath { - // On Swift 1.12, the 'bytes' field is always 0 - // so we need to do a second HEAD request - info, _, err := d.Conn.Object(d.Container, swiftPath) - if err != nil { - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - fi.IsDir = false - fi.Size = info.Bytes - fi.ModTime = info.LastModified - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } - } - - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - var files []string - - prefix := d.swiftPath(path) - if prefix != "" { - prefix += "/" - } - - opts := &swift.ObjectsOpts{ - Prefix: prefix, - Delimiter: '/', - } - - objects, err := d.Conn.ObjectsAll(d.Container, opts) - for _, obj := range objects { - files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) - } - - if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { - return files, storagedriver.PathNotFoundError{Path: path} - } - return files, err -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) - if err == nil { - if manifest, ok := headers["X-Object-Manifest"]; ok { - if err = d.createManifest(destPath, manifest); err != nil { - return err - } - err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) - } else { - err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) - } - } - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - opts := swift.ObjectsOpts{ - Prefix: d.swiftPath(path) + "/", - } - - objects, err := d.Conn.ObjectsAll(d.Container, &opts) - if err != nil { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - - for _, obj := range objects { - if obj.PseudoDirectory { - continue - } - if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { - manifest, ok := headers["X-Object-Manifest"] - if ok { - _, prefix := parseManifest(manifest) - segments, err := d.getAllSegments(prefix) - if err != nil { - return err - } - objects = append(objects, segments...) - } - } else { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} - } - return err - } - } - - if d.BulkDeleteSupport && len(objects) > 0 { - filenames := make([]string, len(objects)) - for i, obj := range objects { - filenames[i] = obj.Name - } - _, err = d.Conn.BulkDelete(d.Container, filenames) - // Don't fail on ObjectNotFound because eventual consistency - // makes this situation normal. - if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } else { - for _, obj := range objects { - if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} - } - return err - } - } - } - - _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } else if err == swift.ObjectNotFound { - if len(objects) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - } else { - return err - } - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - if d.SecretKey == "" { - return "", storagedriver.ErrUnsupportedMethod{} - } - - methodString := "GET" - method, ok := options["method"] - if ok { - if methodString, ok = method.(string); !ok { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - if methodString == "HEAD" { - // A "HEAD" request on a temporary URL is allowed if the - // signature was generated with "GET", "POST" or "PUT" - methodString = "GET" - } - - supported := false - for _, method := range d.TempURLMethods { - if method == methodString { - supported = true - break - } - } - - if !supported { - return "", storagedriver.ErrUnsupportedMethod{} - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) - - if d.AccessKey != "" { - // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature - url, _ := url.Parse(tempURL) - query := url.Query() - query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) - url.RawQuery = query.Encode() - tempURL = url.String() - } - - return tempURL, nil -} - -func (d *driver) swiftPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") -} - -func (d *driver) swiftSegmentPath(path string) (string, error) { - checksum := sha1.New() - random := make([]byte, 32) - if _, err := rand.Read(random); err != nil { - return "", err - } - path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -func (d *driver) getAllSegments(path string) ([]swift.Object, error) { - segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return segments, err -} - -func (d *driver) createManifest(path string, segments string) error { - headers := make(swift.Headers) - headers["X-Object-Manifest"] = segments - manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) - if err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - if err := manifest.Close(); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - return nil -} - -func parseManifest(manifest string) (container string, prefix string) { - components := strings.SplitN(manifest, "/", 2) - container = components[0] - if len(components) > 1 { - prefix = components[1] - } - return container, prefix -} - -func generateSecret() (string, error) { - var secretBytes [32]byte - if _, err := rand.Read(secretBytes[:]); err != nil { - return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) - } - return hex.EncodeToString(secretBytes[:]), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go deleted file mode 100644 index 5c34cca6..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go +++ /dev/null @@ -1,1222 +0,0 @@ -package testsuites - -import ( - "bytes" - "crypto/sha1" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" -) - -// Test hooks up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -// RegisterSuite registers an in-process storage driver test suite with -// the go test runner. -func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { - check.Suite(&DriverSuite{ - Constructor: driverConstructor, - SkipCheck: skipCheck, - ctx: context.Background(), - }) -} - -// SkipCheck is a function used to determine if a test suite should be skipped. -// If a SkipCheck returns a non-empty skip reason, the suite is skipped with -// the given reason. -type SkipCheck func() (reason string) - -// NeverSkip is a default SkipCheck which never skips the suite. -var NeverSkip SkipCheck = func() string { return "" } - -// DriverConstructor is a function which returns a new -// storagedriver.StorageDriver. -type DriverConstructor func() (storagedriver.StorageDriver, error) - -// DriverTeardown is a function which cleans up a suite's -// storagedriver.StorageDriver. -type DriverTeardown func() error - -// DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. The intended way to create a DriverSuite is -// with RegisterSuite. -type DriverSuite struct { - Constructor DriverConstructor - Teardown DriverTeardown - SkipCheck - storagedriver.StorageDriver - ctx context.Context -} - -// SetUpSuite sets up the gocheck test suite. -func (suite *DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } - d, err := suite.Constructor() - c.Assert(err, check.IsNil) - suite.StorageDriver = d -} - -// TearDownSuite tears down the gocheck test suite. -func (suite *DriverSuite) TearDownSuite(c *check.C) { - if suite.Teardown != nil { - err := suite.Teardown() - c.Assert(err, check.IsNil) - } -} - -// TearDownTest tears down the gocheck test. -// This causes the suite to abort if any files are left around in the storage -// driver. -func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List(suite.ctx, "/") - if len(files) > 0 { - c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) - } -} - -// TestRootExists ensures that all storage drivers have a root path by default. -func (suite *DriverSuite) TestRootExists(c *check.C) { - _, err := suite.StorageDriver.List(suite.ctx, "/") - if err != nil { - c.Fatalf(`the root path "/" should always exist: %v`, err) - } -} - -// TestValidPaths checks that various valid file paths are accepted by the -// storage driver. -func (suite *DriverSuite) TestValidPaths(c *check.C) { - contents := randomContents(64) - validFiles := []string{ - "/a", - "/2", - "/aa", - "/a.a", - "/0-9/abcdefg", - "/abcdefg/z.75", - "/abc/1.2.3.4.5-6_zyx/123.z/4", - "/docker/docker-registry", - "/123.abc", - "/abc./abc", - "/.abc", - "/a--b", - "/a-.b", - "/_.abc", - "/Docker/docker-registry", - "/Abc/Cba"} - - for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.deletePath(c, firstPart(filename)) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - } -} - -func (suite *DriverSuite) deletePath(c *check.C, path string) { - for tries := 2; tries > 0; tries-- { - err := suite.StorageDriver.Delete(suite.ctx, path) - if _, ok := err.(storagedriver.PathNotFoundError); ok { - err = nil - } - c.Assert(err, check.IsNil) - paths, err := suite.StorageDriver.List(suite.ctx, path) - if len(paths) == 0 { - break - } - time.Sleep(time.Second * 2) - } -} - -// TestInvalidPaths checks that various invalid file paths are rejected by the -// storage driver. -func (suite *DriverSuite) TestInvalidPaths(c *check.C) { - contents := randomContents(64) - invalidFiles := []string{ - "", - "/", - "abc", - "123.abc", - "//bcd", - "/abc_123/"} - - for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - // only delete if file was succesfully written - if err == nil { - defer suite.deletePath(c, firstPart(filename)) - } - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - } -} - -// TestWriteRead1 tests a simple write-read workflow. -func (suite *DriverSuite) TestWriteRead1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead2 tests a simple write-read workflow with unicode data. -func (suite *DriverSuite) TestWriteRead2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead3 tests a simple write-read workflow with a small string. -func (suite *DriverSuite) TestWriteRead3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead4 tests a simple write-read workflow with 1MB of data. -func (suite *DriverSuite) TestWriteRead4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompare(c, filename, contents) -} - -// TestTruncate tests that putting smaller contents than an original file does -// remove the excess contents. -func (suite *DriverSuite) TestTruncate(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) - - contents = randomContents(1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestReadNonexistent tests reading content from an empty path. -func (suite *DriverSuite) TestReadNonexistent(c *check.C) { - filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestWriteReadStreams1 tests a simple write-read streaming workflow. -func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams2 tests a simple write-read streaming workflow with -// unicode data. -func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams3 tests a simple write-read streaming workflow with a -// small amount of data. -func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB -// of data. -func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the -// storage driver safely. -func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - checksum := sha1.New() - var fileSize int64 = 5 * 1024 * 1024 * 1024 - - contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, fileSize) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - writtenChecksum := sha1.New() - io.Copy(writtenChecksum, reader) - - c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) -} - -// TestReadStreamWithOffset tests that the appropriate data is streamed when -// reading with a given offset. -func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - chunkSize := int64(32) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contentsChunk3) - - // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(reader, check.IsNil) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - // Read past the end of the content and make sure we get a reader that - // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) - c.Assert(err, check.IsNil) - defer reader.Close() - - buf := make([]byte, chunkSize) - n, err := reader.Read(buf) - c.Assert(err, check.Equals, io.EOF) - c.Assert(n, check.Equals, 0) - - // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) - c.Assert(err, check.IsNil) - defer reader.Close() - - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 1) - - // We don't care whether the io.EOF comes on the this read or the first - // zero read, but the only error acceptable here is io.EOF. - if err != nil { - c.Assert(err, check.Equals, io.EOF) - } - - // Any more reads should result in zero bytes and io.EOF - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 0) - c.Assert(err, check.Equals, io.EOF) -} - -// TestContinueStreamAppendLarge tests that a stream write can be appended to without -// corrupting the data with a large chunk size. -func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { - suite.testContinueStreamAppend(c, int64(10*1024*1024)) -} - -// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only -// with a tiny chunk size in order to test corner cases for some cloud storage drivers. -func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { - suite.testContinueStreamAppend(c, int64(32)) -} - -func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - contentsChunk4 := randomContents(chunkSize) - zeroChunk := make([]byte, int64(chunkSize)) - - fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, fullContents) - - // Writing past size of file extends file (no offset error). We would like - // to write chunk 4 one chunk length past chunk 3. It should be successful - // and the resulting file will be 5 chunks long, with a chunk of all - // zeros. - - fullContents = append(fullContents, zeroChunk...) - fullContents = append(fullContents, contentsChunk4...) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, chunkSize) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - - received, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(len(received), check.Equals, len(fullContents)) - c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) - c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) - c.Assert(received, check.DeepEquals, fullContents) - - // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestReadNonexistentStream tests that reading a stream for a nonexistent path -// fails. -func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { - filename := randomPath(32) - - _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestList checks the returned list of keys after populating a directory tree. -func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.deletePath(c, rootDirectory) - - doesnotexist := path.Join(rootDirectory, "nonexistent") - _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) - c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ - Path: doesnotexist, - DriverName: suite.StorageDriver.Name(), - }) - - parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles := make([]string, 50) - for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles[i] = childFile - err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) - c.Assert(err, check.IsNil) - } - sort.Strings(childFiles) - - keys, err := suite.StorageDriver.List(suite.ctx, "/") - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) - c.Assert(err, check.IsNil) - - sort.Strings(keys) - c.Assert(keys, check.DeepEquals, childFiles) - - // A few checks to add here (check out #819 for more discussion on this): - // 1. Ensure that all paths are absolute. - // 2. Ensure that listings only include direct children. - // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). -} - -// TestMove checks that a moved object no longer exists at the source path and -// does exist at the destination. -func (suite *DriverSuite) TestMove(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.deletePath(c, firstPart(sourcePath)) - defer suite.deletePath(c, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestMoveOverwrite checks that a moved object no longer exists at the source -// path and overwrites the contents at the destination. -func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { - sourcePath := randomPath(32) - destPath := randomPath(32) - sourceContents := randomContents(32) - destContents := randomContents(64) - - defer suite.deletePath(c, firstPart(sourcePath)) - defer suite.deletePath(c, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, sourceContents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestMoveNonexistent checks that moving a nonexistent key fails and does not -// delete the data at the destination path. -func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.deletePath(c, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) -} - -// TestMoveInvalid provides various checks for invalid moves. -func (suite *DriverSuite) TestMoveInvalid(c *check.C) { - contents := randomContents(32) - - // Create a regular file. - err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) - c.Assert(err, check.IsNil) - defer suite.deletePath(c, "/notadir") - - // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") - c.Assert(err, check.NotNil) // non-nil error -} - -// TestDelete checks that the delete operation removes data from the storage -// driver -func (suite *DriverSuite) TestDelete(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestURLFor checks that the URLFor method functions properly, but only if it -// is implemented -func (suite *DriverSuite) TestURLFor(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) - if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { - return - } - c.Assert(err, check.IsNil) - - response, err := http.Get(url) - c.Assert(err, check.IsNil) - defer response.Body.Close() - - read, err := ioutil.ReadAll(response.Body) - c.Assert(err, check.IsNil) - c.Assert(read, check.DeepEquals, contents) - - url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) - if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { - return - } - c.Assert(err, check.IsNil) - - response, err = http.Head(url) - c.Assert(response.StatusCode, check.Equals, 200) - c.Assert(response.ContentLength, check.Equals, int64(32)) -} - -// TestDeleteNonexistent checks that removing a nonexistent key fails. -func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { - filename := randomPath(32) - err := suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestDeleteFolder checks that deleting a folder removes all child elements. -func (suite *DriverSuite) TestDeleteFolder(c *check.C) { - dirname := randomPath(32) - filename1 := randomPath(32) - filename2 := randomPath(32) - filename3 := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(dirname)) - - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, dirname) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestStatCall runs verifies the implementation of the storagedriver's Stat call. -func (suite *DriverSuite) TestStatCall(c *check.C) { - content := randomContents(4096) - dirPath := randomPath(32) - fileName := randomFilename(32) - filePath := path.Join(dirPath, fileName) - - defer suite.deletePath(c, firstPart(dirPath)) - - // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - c.Assert(fi, check.IsNil) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - c.Assert(fi, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - - // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, filePath) - c.Assert(fi.Size(), check.Equals, int64(len(content))) - c.Assert(fi.IsDir(), check.Equals, false) - createdTime := fi.ModTime() - - // Sleep and modify the file - time.Sleep(time.Second * 10) - content = randomContents(4096) - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) - - // Check if the modification time is after the creation time. - // In case of cloud storage services, storage frontend nodes might have - // time drift between them, however that should be solved with sleeping - // before update. - modTime := fi.ModTime() - if !modTime.After(createdTime) { - c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) - } - - // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, dirPath) - c.Assert(fi.Size(), check.Equals, int64(0)) - c.Assert(fi.IsDir(), check.Equals, true) -} - -// TestPutContentMultipleTimes checks that if storage driver can overwrite the content -// in the subsequent puts. Validates that PutContent does not have to work -// with an offset like WriteStream does and overwrites the file entirely -// rather than writing the data to the [0,len(data)) of the file. -func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { - filename := randomPath(32) - contents := randomContents(4096) - - defer suite.deletePath(c, firstPart(filename)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents) -} - -// TestConcurrentStreamReads checks that multiple clients can safely read from -// the same file simultaneously with various offsets. -func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { - var filesize int64 = 128 * 1024 * 1024 - - if testing.Short() { - filesize = 10 * 1024 * 1024 - c.Log("Reducing file size to 10MB for short mode") - } - - filename := randomPath(32) - contents := randomContents(filesize) - - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - var wg sync.WaitGroup - - readContents := func() { - defer wg.Done() - offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents[offset:]) - } - - wg.Add(10) - for i := 0; i < 10; i++ { - go readContents() - } - wg.Wait() -} - -// TestConcurrentFileStreams checks that multiple *os.File objects can be passed -// in to WriteStream concurrently without hanging. -func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - numStreams := 32 - - if testing.Short() { - numStreams = 8 - c.Log("Reducing number of streams to 8 for short mode") - } - - var wg sync.WaitGroup - - testStream := func(size int64) { - defer wg.Done() - suite.testFileStreams(c, size) - } - - wg.Add(numStreams) - for i := numStreams; i > 0; i-- { - go testStream(int64(numStreams) * 1024 * 1024) - } - - wg.Wait() -} - -// TestEventualConsistency checks that if stat says that a file is a certain size, then -// you can freely read from the file (this is the only guarantee that the driver needs to provide) -func (suite *DriverSuite) TestEventualConsistency(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - var offset int64 - var misswrites int - var chunkSize int64 = 32 - - for i := 0; i < 1024; i++ { - contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - - // We are most concerned with being able to read data as soon as Stat declares - // it is uploaded. This is the strongest guarantee that some drivers (that guarantee - // at best eventual consistency) absolutely need to provide. - if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) - - reader.Close() - offset += read - } else { - misswrites++ - } - } - - if misswrites > 0 { - c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") - } - - c.Assert(misswrites, check.Not(check.Equals), 1024) -} - -// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files -func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 0) -} - -// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files -func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024) -} - -// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files -func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024) -} - -// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files -func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - } -} - -// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files -func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 0) -} - -// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files -func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024) -} - -// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files -func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024) -} - -// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files -func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, size) - - rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - rc.Close() - } -} - -// BenchmarkList5Files benchmarks List for 5 small files -func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { - suite.benchmarkListFiles(c, 5) -} - -// BenchmarkList50Files benchmarks List for 50 small files -func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { - suite.benchmarkListFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - - c.ResetTimer() - for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(suite.ctx, parentDir) - c.Assert(err, check.IsNil) - c.Assert(int64(len(files)), check.Equals, numFiles) - } -} - -// BenchmarkDelete5Files benchmarks Delete for 5 small files -func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 5) -} - -// BenchmarkDelete50Files benchmarks Delete for 50 small files -func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { - for i := 0; i < c.N; i++ { - parentDir := randomPath(8) - defer suite.deletePath(c, firstPart(parentDir)) - - c.StopTimer() - for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - c.StartTimer() - - // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - c.Assert(err, check.IsNil) - } -} - -func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { - tf, err := ioutil.TempFile("", "tf") - c.Assert(err, check.IsNil) - defer os.Remove(tf.Name()) - defer tf.Close() - - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - contents := randomContents(size) - - _, err = tf.Write(contents) - c.Assert(err, check.IsNil) - - tf.Sync() - tf.Seek(0, os.SEEK_SET) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, size) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.deletePath(c, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.deletePath(c, firstPart(filename)) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contents))) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") -var separatorChars = []byte("._-") - -func randomPath(length int64) string { - path := "/" - for int64(len(path)) < length { - chunkLength := rand.Int63n(length-int64(len(path))) + 1 - chunk := randomFilename(chunkLength) - path += chunk - remaining := length - int64(len(path)) - if remaining == 1 { - path += randomFilename(1) - } else if remaining > 1 { - path += "/" - } - } - return path -} - -func randomFilename(length int64) string { - b := make([]byte, length) - wasSeparator := true - for i := range b { - if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { - b[i] = separatorChars[rand.Intn(len(separatorChars))] - wasSeparator = true - } else { - b[i] = filenameChars[rand.Intn(len(filenameChars))] - wasSeparator = false - } - } - return string(b) -} - -// randomBytes pre-allocates all of the memory sizes needed for the test. If -// anything panics while accessing randomBytes, just make this number bigger. -var randomBytes = make([]byte, 128<<20) - -func init() { - // increase the random bytes to the required maximum - for i := range randomBytes { - randomBytes[i] = byte(rand.Intn(2 << 8)) - } -} - -func randomContents(length int64) []byte { - return randomBytes[:length] -} - -type randReader struct { - r int64 - m sync.Mutex -} - -func (rr *randReader) Read(p []byte) (n int, err error) { - rr.m.Lock() - defer rr.m.Unlock() - - n = copy(p, randomContents(int64(len(p)))) - rr.r -= int64(n) - - if rr.r <= 0 { - err = io.EOF - } - - return -} - -func newRandReader(n int64) *randReader { - return &randReader{r: n} -} - -func firstPart(filePath string) string { - if filePath == "" { - return "/" - } - for { - if filePath[len(filePath)-1] == '/' { - filePath = filePath[:len(filePath)-1] - } - - dir, file := path.Split(filePath) - if dir == "" && file == "" { - return "/" - } - if dir == "/" || dir == "" { - return "/" + file - } - if file == "" { - return dir - } - filePath = dir - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go deleted file mode 100644 index b3a5f520..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go +++ /dev/null @@ -1,177 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): Set an optimal buffer size here. We'll have to -// understand the latency characteristics of the underlying network to -// set this correctly, so we may want to leave it to the driver. For -// out of process drivers, we'll have to optimize this buffer size for -// local communication. -const fileReaderBufferSize = 4 << 20 - -// remoteFileReader provides a read seeker interface to files stored in -// storagedriver. Used to implement part of layer interface and will be used -// to implement read side of LayerUpload. -type fileReader struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - size int64 // size is the total size, must be set. - - // mutable fields - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 // offset is the current read offset - err error // terminal error, if set, reader is closed -} - -// newFileReader initializes a file reader for the remote file. The reader -// takes on the size and path that must be determined externally with a stat -// call. The reader operates optimistically, assuming that the file is already -// there. -func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { - return &fileReader{ - ctx: ctx, - driver: driver, - path: path, - size: size, - }, nil -} - -func (fr *fileReader) Read(p []byte) (n int, err error) { - if fr.err != nil { - return 0, fr.err - } - - rd, err := fr.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - fr.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && fr.offset >= fr.size { - err = io.EOF - } - - return n, err -} - -func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { - if fr.err != nil { - return 0, fr.err - } - - var err error - newOffset := fr.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fr.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if fr.offset != newOffset { - fr.reset() - } - - // No problems, set the offset. - fr.offset = newOffset - } - - return fr.offset, err -} - -func (fr *fileReader) Close() error { - return fr.closeWithErr(fmt.Errorf("fileReader: closed")) -} - -// reader prepares the current reader at the lrs offset, ensuring its buffered -// and ready to go. -func (fr *fileReader) reader() (io.Reader, error) { - if fr.err != nil { - return nil, fr.err - } - - if fr.rc != nil { - return fr.brd, nil - } - - // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): If the path is not found, we simply return a - // reader that returns io.EOF. However, we do not set fr.rc, - // allowing future attempts at getting a reader to possibly - // succeed if the file turns up later. - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - default: - return nil, err - } - } - - fr.rc = rc - - if fr.brd == nil { - fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) - } else { - fr.brd.Reset(fr.rc) - } - - return fr.brd, nil -} - -// resetReader resets the reader, forcing the read method to open up a new -// connection and rebuild the buffered reader. This should be called when the -// offset and the reader will become out of sync, such as during a seek -// operation. -func (fr *fileReader) reset() { - if fr.err != nil { - return - } - if fr.rc != nil { - fr.rc.Close() - fr.rc = nil - } -} - -func (fr *fileReader) closeWithErr(err error) error { - if fr.err != nil { - return fr.err - } - - fr.err = err - - // close and release reader chain - if fr.rc != nil { - fr.rc.Close() - } - - fr.rc = nil - fr.brd = nil - - return fr.err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go deleted file mode 100644 index 529fa673..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go +++ /dev/null @@ -1,180 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -const ( - fileWriterBufferSize = 5 << 20 -) - -// fileWriter implements a remote file writer backed by a storage driver. -type fileWriter struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - - // mutable fields - size int64 // size of the file, aka the current end - offset int64 // offset is the current write offset - err error // terminal error, if set, reader is closed -} - -type bufferedFileWriter struct { - fileWriter - bw *bufio.Writer -} - -// fileWriterInterface makes the desired io compliant interface that the -// filewriter should implement. -type fileWriterInterface interface { - io.WriteSeeker - io.ReaderFrom - io.Closer -} - -var _ fileWriterInterface = &fileWriter{} - -// newFileWriter returns a prepared fileWriter for the driver and path. This -// could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { - fw := fileWriter{ - driver: driver, - path: path, - ctx: ctx, - } - - if fi, err := driver.Stat(ctx, path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot write to a directory") - } - - fw.size = fi.Size() - } - - buffered := bufferedFileWriter{ - fileWriter: fw, - } - buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) - - return &buffered, nil -} - -// wraps the fileWriter.Write method to buffer small writes -func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { - return bfw.bw.Write(p) -} - -// wraps fileWriter.Close to ensure the buffer is flushed -// before we close the writer. -func (bfw *bufferedFileWriter) Close() (err error) { - if err = bfw.Flush(); err != nil { - return err - } - err = bfw.fileWriter.Close() - return err -} - -// wraps fileWriter.Seek to ensure offset is handled -// correctly in respect to pending data in the buffer -func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { - if err := bfw.Flush(); err != nil { - return 0, err - } - return bfw.fileWriter.Seek(offset, whence) -} - -// wraps bufio.Writer.Flush to allow intermediate flushes -// of the bufferedFileWriter -func (bfw *bufferedFileWriter) Flush() error { - return bfw.bw.Flush() -} - -// Write writes the buffer p at the current write offset. -func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.ReadFrom(bytes.NewReader(p)) - return int(nn), err -} - -// ReadFrom reads reader r until io.EOF writing the contents at the current -// offset. -func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) - - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - - return nn, err -} - -// Seek moves the write position do the requested offest based on the whence -// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. -func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { - if fw.err != nil { - return 0, fw.err - } - - var err error - newOffset := fw.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fw.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - // No problems, set the offset. - fw.offset = newOffset - } - - return fw.offset, err -} - -// Close closes the fileWriter for writing. -// Calling it once is valid and correct and it will -// return a nil error. Calling it subsequent times will -// detect that fw.err has been set and will return the error. -func (fw *fileWriter) Close() error { - if fw.err != nil { - return fw.err - } - - fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go deleted file mode 100644 index a1f8724d..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ /dev/null @@ -1,415 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" -) - -// linkPathFunc describes a function that can resolve a link based on the -// repository name and digest. -type linkPathFunc func(name string, dgst digest.Digest) (string, error) - -// linkedBlobStore provides a full BlobService that namespaces the blobs to a -// given repository. Effectively, it manages the links in a given repository -// that grant access to the global blob store. -type linkedBlobStore struct { - *blobStore - registry *registry - blobServer distribution.BlobServer - blobAccessController distribution.BlobDescriptorService - repository distribution.Repository - ctx context.Context // only to be used where context can't come through method args - deleteEnabled bool - resumableDigestEnabled bool - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc -} - -var _ distribution.BlobStore = &linkedBlobStore{} - -func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return lbs.blobAccessController.Stat(ctx, dgst) -} - -func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Get(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Open(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return err - } - - if canonical.MediaType != "" { - // Set the repository local content type. - w.Header().Set("Content-Type", canonical.MediaType) - } - - return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) -} - -func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - // Place the data in the blob store first. - desc, err := lbs.blobStore.Put(ctx, mediaType, p) - if err != nil { - context.GetLogger(ctx).Errorf("error putting into main store: %v", err) - return distribution.Descriptor{}, err - } - - if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype if incoming differs from what is - // returned by Put above. Note that we should allow updates for a given - // repository. - - return desc, lbs.linkBlob(ctx, desc) -} - -// createOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type createOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - } -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*createOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -// Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") - - var opts createOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - if opts.Mount.ShouldMount { - desc, err := lbs.mount(ctx, opts.Mount.From.Name(), opts.Mount.From.Digest()) - if err == nil { - // Mount successful, no need to initiate an upload session - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - } - } - - uuid := uuid.Generate().String() - startedAt := time.Now().UTC() - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, uuid, path, startedAt) -} - -func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, id, path, startedAt) -} - -func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - if !lbs.deleteEnabled { - return distribution.ErrUnsupported - } - - // Ensure the blob is available for deletion - _, err := lbs.blobAccessController.Stat(ctx, dgst) - if err != nil { - return err - } - - err = lbs.blobAccessController.Clear(ctx, dgst) - if err != nil { - return err - } - - return nil -} - -func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { - repo, err := lbs.registry.Repository(ctx, sourceRepo) - if err != nil { - return distribution.Descriptor{}, err - } - stat, err := repo.Blobs(ctx).Stat(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - desc := distribution.Descriptor{ - Size: stat.Size, - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - } - return desc, lbs.linkBlob(ctx, desc) -} - -// newBlobUpload allocates a new upload controller with the given state. -func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { - fw, err := newFileWriter(ctx, lbs.driver, path) - if err != nil { - return nil, err - } - - bw := &blobWriter{ - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.New(), - bufferedFileWriter: *fw, - resumableDigestEnabled: lbs.resumableDigestEnabled, - } - - return bw, nil -} - -// linkBlob links a valid, written blob into the registry under the named -// repository for the upload controller. -func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical.Digest}, aliases...) - - // TODO(stevvooe): Need to write out mediatype for only canonical hash - // since we don't care about the aliases. They are generally unused except - // for tarsum but those versions don't care about mediatype. - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - // only use the first link - linkPathFn := lbs.linkPathFns[0] - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) - if err != nil { - return err - } - - if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { - return err - } - } - - return nil -} - -type linkedBlobStatter struct { - *blobStore - repository distribution.Repository - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc -} - -var _ distribution.BlobDescriptorService = &linkedBlobStatter{} - -func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - var ( - resolveErr error - target digest.Digest - ) - - // try the many link path functions until we get success or an error that - // is not PathNotFoundError. - for _, linkPathFn := range lbs.linkPathFns { - var err error - target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) - - if err == nil { - break // success! - } - - switch err := err.(type) { - case driver.PathNotFoundError: - resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error - default: - return distribution.Descriptor{}, err - } - } - - if resolveErr != nil { - return distribution.Descriptor{}, resolveErr - } - - if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. - context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) - } - - // TODO(stevvooe): Look up repository local mediatype and replace that on - // the returned descriptor. - - return lbs.blobStore.statter.Stat(ctx, target) -} - -func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { - // clear any possible existence of a link described in linkPathFns - for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) - if err != nil { - return err - } - - err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - continue // just ignore this error and continue - default: - return err - } - } - } - - return nil -} - -// resolveTargetWithFunc allows us to read a link to a resource with different -// linkPathFuncs to let us try a few different paths before returning not -// found. -func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) - if err != nil { - return "", err - } - - return lbs.blobStore.readlink(ctx, blobLinkPath) -} - -func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - // The canonical descriptor for a blob is set at the commit phase of upload - return nil -} - -// blobLinkPath provides the path to the blob link, also known as layers. -func blobLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(layerLinkPathSpec{name: name, digest: dgst}) -} - -// manifestRevisionLinkPath provides the path to the manifest revision link. -func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go deleted file mode 100644 index 42027d13..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go +++ /dev/null @@ -1,96 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" -) - -// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. -type manifestListHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &manifestListHandler{} - -func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") - - var m manifestlist.DeserializedManifestList - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") - - m, ok := manifestList.(*manifestlist.DeserializedManifestList) - if !ok { - return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to -// store valid content, leaving trust policies of that content up to -// consumers. -func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - // This manifest service is different from the blob service - // returned by Blob. It uses a linked blob store to ensure that - // only manifests are accessible. - manifestService, err := ms.repository.Manifests(ctx) - if err != nil { - return err - } - - for _, manifestDescriptor := range mnfst.References() { - exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) - if err != nil && err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - if err != nil || !exists { - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go deleted file mode 100644 index 31daa83c..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go +++ /dev/null @@ -1,134 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" -) - -// A ManifestHandler gets and puts manifests of a particular type. -type ManifestHandler interface { - // Unmarshal unmarshals the manifest from a byte slice. - Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest. - Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) -} - -// SkipLayerVerification allows a manifest to be Put before its -// layers are on the filesystem -func SkipLayerVerification() distribution.ManifestServiceOption { - return skipLayerOption{} -} - -type skipLayerOption struct{} - -func (o skipLayerOption) Apply(m distribution.ManifestService) error { - if ms, ok := m.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil - } - return fmt.Errorf("skip layer verification only valid for manifestStore") -} - -type manifestStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - - skipDependencyVerification bool - - schema1Handler ManifestHandler - schema2Handler ManifestHandler - manifestListHandler ManifestHandler -} - -var _ distribution.ManifestService = &manifestStore{} - -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - - _, err := ms.blobStore.Stat(ms.ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := ms.blobStore.Get(ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Name(), - Revision: dgst, - } - } - - return nil, err - } - - var versioned manifest.Versioned - if err = json.Unmarshal(content, &versioned); err != nil { - return nil, err - } - - switch versioned.SchemaVersion { - case 1: - return ms.schema1Handler.Unmarshal(ctx, dgst, content) - case 2: - // This can be an image manifest or a manifest list - switch versioned.MediaType { - case schema2.MediaTypeManifest: - return ms.schema2Handler.Unmarshal(ctx, dgst, content) - case manifestlist.MediaTypeManifestList: - return ms.manifestListHandler.Unmarshal(ctx, dgst, content) - default: - return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} - } - } - - return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) -} - -func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - - switch manifest.(type) { - case *schema1.SignedManifest: - return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *schema2.DeserializedManifest: - return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *manifestlist.DeserializedManifestList: - return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) - } - - return "", fmt.Errorf("unrecognized manifest type %T", manifest) -} - -// Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.blobStore.Delete(ctx, dgst) -} - -func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - return 0, distribution.ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go deleted file mode 100644 index 4d2d48c1..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go +++ /dev/null @@ -1,493 +0,0 @@ -package storage - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/distribution/digest" -) - -const ( - storagePathVersion = "v2" // fixed storage layout version - storagePathRoot = "/docker/registry/" // all driver paths have a prefix - - // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though - // storage path root would configurable for all drivers through this - // package. In reality, we've found it simpler to do this on a per driver - // basis. -) - -// pathFor maps paths based on "object names" and their ids. The "object -// names" mapped by are internal to the storage system. -// -// The path layout in the storage backend is roughly as follows: -// -// /v2 -// -> repositories/ -// ->/ -// -> _manifests/ -// revisions -// -> -// -> link -// -> signatures -// //link -// tags/ -// -> current/link -// -> index -// -> //link -// -> _layers/ -// -// -> _uploads/ -// data -// startedat -// hashstates// -// -> blob/ -// -// -// The storage backend layout is broken up into a content-addressable blob -// store and repositories. The content-addressable blob store holds most data -// throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controled through links from the -// repository to blobstore. -// -// A repository is made up of layers, manifests and tags. The layers component -// is just a directory of layers which are "linked" into a repository. A layer -// can only be accessed through a qualified repository name if it is linked in -// the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload id. When all data for an upload is received, the -// data is moved into the blob store and the upload directory is deleted. -// Abandoned uploads can be garbage collected by reading the startedat file -// and removing uploads that have been active for longer than a certain time. -// -// The third component of the repository directory is the manifests store, -// which is made up of a revision store and tag store. Manifests are stored in -// the blob store and linked into the revision store. Signatures are separated -// from the manifest payload data and linked into the blob store, as well. -// While the registry can save all revisions of a manifest, no relationship is -// implied as to the ordering of changes to a manifest. The tag store provides -// support for name, tag lookups of manifests, using "current/link" under a -// named tag directory. An index is maintained to support deletions of all -// revisions of a given manifest tag. -// -// We cover the path formats implemented by this path mapper below. -// -// Manifests: -// -// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// -// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link -// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ -// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link -// -// Tags: -// -// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ -// manifestTagPathSpec: /v2/repositories//_manifests/tags// -// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link -// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ -// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// -// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link -// -// Blobs: -// -// layerLinkPathSpec: /v2/repositories//_layers///link -// -// Uploads: -// -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// -// -// Blob Store: -// -// blobPathSpec: /v2/blobs/// -// blobDataPathSpec: /v2/blobs////data -// blobMediaTypePathSpec: /v2/blobs////data -// -// For more information on the semantic meaning of each path and their -// contents, please see the path spec documentation. -func pathFor(spec pathSpec) (string, error) { - - // Switch on the path object type and return the appropriate path. At - // first glance, one may wonder why we don't use an interface to - // accomplish this. By keep the formatting separate from the pathSpec, we - // keep separate the path generation componentized. These specs could be - // passed to a completely different mapper implementation and generate a - // different set of paths. - // - // For example, imagine migrating from one backend to the other: one could - // build a filesystem walker that converts a string path in one version, - // to an intermediate path object, than can be consumed and mapped by the - // other version. - - rootPrefix := []string{storagePathRoot, storagePathVersion} - repoPrefix := append(rootPrefix, "repositories") - - switch v := spec.(type) { - - case manifestRevisionPathSpec: - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil - case manifestRevisionLinkPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestSignaturesPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "signatures"), nil - case manifestSignatureLinkPathSpec: - root, err := pathFor(manifestSignaturesPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - signatureComponents, err := digestPathComponents(v.signature, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil - case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil - case manifestTagPathSpec: - root, err := pathFor(manifestTagsPathSpec{ - name: v.name, - }) - - if err != nil { - return "", err - } - - return path.Join(root, v.tag), nil - case manifestTagCurrentPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "current", "link"), nil - case manifestTagIndexPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "index"), nil - case manifestTagIndexEntryLinkPathSpec: - root, err := pathFor(manifestTagIndexEntryPathSpec{ - name: v.name, - tag: v.tag, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagIndexEntryPathSpec: - root, err := pathFor(manifestTagIndexPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(components...)), nil - case layerLinkPathSpec: - components, err := digestPathComponents(v.digest, false) - if err != nil { - return "", err - } - - // TODO(stevvooe): Right now, all blobs are linked under "_layers". If - // we have future migrations, we may want to rename this to "_blobs". - // A migration strategy would simply leave existing items in place and - // write the new paths, commit a file then delete the old files. - - blobLinkPathComponents := append(repoPrefix, v.name, "_layers") - - return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil - case blobDataPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - components = append(components, "data") - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - - case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil - case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil - case uploadHashStatePathSpec: - offset := fmt.Sprintf("%d", v.offset) - if v.list { - offset = "" // Limit to the prefix for listing offsets. - } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil - case repositoriesRootPathSpec: - return path.Join(repoPrefix...), nil - default: - // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). - return "", fmt.Errorf("unknown path spec: %#v", v) - } -} - -// pathSpec is a type to mark structs as path specs. There is no -// implementation because we'd like to keep the specs and the mappers -// decoupled. -type pathSpec interface { - pathSpec() -} - -// manifestRevisionPathSpec describes the components of the directory path for -// a manifest revision. -type manifestRevisionPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionPathSpec) pathSpec() {} - -// manifestRevisionLinkPathSpec describes the path components required to look -// up the data link for a revision of a manifest. If this file is not present, -// the manifest blob is not available in the given repo. The contents of this -// file should just be the digest. -type manifestRevisionLinkPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionLinkPathSpec) pathSpec() {} - -// manifestSignaturesPathSpec decribes the path components for the directory -// containing all the signatures for the target blob. Entries are named with -// the underlying key id. -type manifestSignaturesPathSpec struct { - name string - revision digest.Digest -} - -func (manifestSignaturesPathSpec) pathSpec() {} - -// manifestSignatureLinkPathSpec decribes the path components used to look up -// a signature file by the hash of its blob. -type manifestSignatureLinkPathSpec struct { - name string - revision digest.Digest - signature digest.Digest -} - -func (manifestSignatureLinkPathSpec) pathSpec() {} - -// manifestTagsPathSpec describes the path elements required to point to the -// manifest tags directory. -type manifestTagsPathSpec struct { - name string -} - -func (manifestTagsPathSpec) pathSpec() {} - -// manifestTagPathSpec describes the path elements required to point to the -// manifest tag links files under a repository. These contain a blob id that -// can be used to look up the data and signatures. -type manifestTagPathSpec struct { - name string - tag string -} - -func (manifestTagPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the current revision for a -// given tag. -type manifestTagCurrentPathSpec struct { - name string - tag string -} - -func (manifestTagCurrentPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the index of revisions -// with the given tag. -type manifestTagIndexPathSpec struct { - name string - tag string -} - -func (manifestTagIndexPathSpec) pathSpec() {} - -// manifestTagIndexEntryPathSpec contains the entries of the index by revision. -type manifestTagIndexEntryPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryPathSpec) pathSpec() {} - -// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a -// manifest with given tag within the index. -type manifestTagIndexEntryLinkPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} - -// blobLinkPathSpec specifies a path for a blob link, which is a file with a -// blob id. The blob link will contain a content addressable blob id reference -// into the blob store. The format of the contents is as follows: -// -// : -// -// The following example of the file contents is more illustrative: -// -// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 -// -// This indicates that there is a blob with the id/digest, calculated via -// sha256 that can be fetched from the blob store. -type layerLinkPathSpec struct { - name string - digest digest.Digest -} - -func (layerLinkPathSpec) pathSpec() {} - -// blobAlgorithmReplacer does some very simple path sanitization for user -// input. Paths should be "safe" before getting this far due to strict digest -// requirements but we can add further path conversion here, if needed. -var blobAlgorithmReplacer = strings.NewReplacer( - "+", "/", - ".", "/", - ";", "/", -) - -// // blobPathSpec contains the path for the registry global blob store. -// type blobPathSpec struct { -// digest digest.Digest -// } - -// func (blobPathSpec) pathSpec() {} - -// blobDataPathSpec contains the path for the registry global blob store. For -// now, this contains layer data, exclusively. -type blobDataPathSpec struct { - digest digest.Digest -} - -func (blobDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters of the data file for -// uploads. -type uploadDataPathSpec struct { - name string - id string -} - -func (uploadDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters for the file that stores the -// start time of an uploads. If it is missing, the upload is considered -// unknown. Admittedly, the presence of this file is an ugly hack to make sure -// we have a way to cleanup old or stalled uploads that doesn't rely on driver -// FileInfo behavior. If we come up with a more clever way to do this, we -// should remove this file immediately and rely on the startetAt field from -// the client to enforce time out policies. -type uploadStartedAtPathSpec struct { - name string - id string -} - -func (uploadStartedAtPathSpec) pathSpec() {} - -// uploadHashStatePathSpec defines the path parameters for the file that stores -// the hash function state of an upload at a specific byte offset. If `list` is -// set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, id, and alg. -type uploadHashStatePathSpec struct { - name string - id string - alg digest.Algorithm - offset int64 - list bool -} - -func (uploadHashStatePathSpec) pathSpec() {} - -// repositoriesRootPathSpec returns the root of repositories -type repositoriesRootPathSpec struct { -} - -func (repositoriesRootPathSpec) pathSpec() {} - -// digestPathComponents provides a consistent path breakdown for a given -// digest. For a generic digest, it will be as follows: -// -// / -// -// If multilevel is true, the first two bytes of the digest will separate -// groups of digest folder. It will be as follows: -// -// // -// -func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { - if err := dgst.Validate(); err != nil { - return nil, err - } - - algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) - hex := dgst.Hex() - prefix := []string{algorithm} - - var suffix []string - - if multilevel { - suffix = append(suffix, hex[:2]) - } - - suffix = append(suffix, hex) - - return append(prefix, suffix...), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go deleted file mode 100644 index 7576b189..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go +++ /dev/null @@ -1,139 +0,0 @@ -package storage - -import ( - "path" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" -) - -// uploadData stored the location of temporary files created during a layer upload -// along with the date the upload was started -type uploadData struct { - containingDir string - startedAt time.Time -} - -func newUploadData() uploadData { - return uploadData{ - containingDir: "", - // default to far in future to protect against missing startedat - startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), - } -} - -// PurgeUploads deletes files from the upload directory -// created before olderThan. The list of files deleted and errors -// encountered are returned -func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { - log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(ctx, driver) - var deleted []string - for _, uploadData := range uploadData { - if uploadData.startedAt.Before(olderThan) { - var err error - log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", - uploadData.containingDir, uploadData.startedAt, olderThan) - if actuallyDelete { - err = driver.Delete(ctx, uploadData.containingDir) - } - if err == nil { - deleted = append(deleted, uploadData.containingDir) - } else { - errors = append(errors, err) - } - } - } - - log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) - return deleted, errors -} - -// getOutstandingUploads walks the upload directory, collecting files -// which could be eligible for deletion. The only reliable way to -// classify the age of a file is with the date stored in the startedAt -// file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { - var errors []error - uploads := make(map[string]uploadData, 0) - - inUploadDir := false - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return uploads, append(errors, err) - } - - err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - if file[0] == '_' { - // Reserved directory - inUploadDir = (file == "_uploads") - - if fileInfo.IsDir() && !inUploadDir { - return ErrSkipDir - } - - } - - uuid, isContainingDir := uUIDFromPath(filePath) - if uuid == "" { - // Cannot reliably delete - return nil - } - ud, ok := uploads[uuid] - if !ok { - ud = newUploadData() - } - if isContainingDir { - ud.containingDir = filePath - } - if file == "startedat" { - if t, err := readStartedAtFile(driver, filePath); err == nil { - ud.startedAt = t - } else { - errors = pushError(errors, filePath, err) - } - - } - - uploads[uuid] = ud - return nil - }) - - if err != nil { - errors = pushError(errors, root, err) - } - return uploads, errors -} - -// uUIDFromPath extracts the upload UUID from a given path -// If the UUID is the last path component, this is the containing -// directory for all upload files -func uUIDFromPath(path string) (string, bool) { - components := strings.Split(path, "/") - for i := len(components) - 1; i >= 0; i-- { - if u, err := uuid.Parse(components[i]); err == nil { - return u.String(), i == len(components)-1 - } - } - return "", false -} - -// readStartedAtFile reads the date from an upload's startedAtFile -func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - // todo:(richardscothern) - pass in a context - startedAtBytes, err := driver.GetContent(context.Background(), path) - if err != nil { - return time.Now(), err - } - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return time.Now(), err - } - return startedAt, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go deleted file mode 100644 index 869895dd..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go +++ /dev/null @@ -1,249 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// registry is the top-level implementation of Registry for use in the storage -// package. All instances should descend from this object. -type registry struct { - blobStore *blobStore - blobServer *blobServer - statter *blobStatter // global statter service. - blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider - deleteEnabled bool - resumableDigestEnabled bool -} - -// RegistryOption is the type used for functional options for NewRegistry. -type RegistryOption func(*registry) error - -// EnableRedirect is a functional option for NewRegistry. It causes the backend -// blob server to attempt using (StorageDriver).URLFor to serve all blobs. -func EnableRedirect(registry *registry) error { - registry.blobServer.redirect = true - return nil -} - -// EnableDelete is a functional option for NewRegistry. It enables deletion on -// the registry. -func EnableDelete(registry *registry) error { - registry.deleteEnabled = true - return nil -} - -// DisableDigestResumption is a functional option for NewRegistry. It should be -// used if the registry is acting as a caching proxy. -func DisableDigestResumption(registry *registry) error { - registry.resumableDigestEnabled = false - return nil -} - -// BlobDescriptorCacheProvider returns a functional option for -// NewRegistry. It creates a cached blob statter for use by the -// registry. -func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { - // TODO(aaronl): The duplication of statter across several objects is - // ugly, and prevents us from using interface types in the registry - // struct. Ideally, blobStore and blobServer should be lazily - // initialized, and use the current value of - // blobDescriptorCacheProvider. - return func(registry *registry) error { - if blobDescriptorCacheProvider != nil { - statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) - registry.blobStore.statter = statter - registry.blobServer.statter = statter - registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider - } - return nil - } -} - -// NewRegistry creates a new registry instance from the provided driver. The -// resulting registry may be shared by multiple goroutines but is cheap to -// allocate. If the Redirect option is specified, the backend blob server will -// attempt to use (StorageDriver).URLFor to serve all blobs. -func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { - // create global statter - statter := &blobStatter{ - driver: driver, - } - - bs := &blobStore{ - driver: driver, - statter: statter, - } - - registry := ®istry{ - blobStore: bs, - blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, - }, - statter: statter, - resumableDigestEnabled: true, - } - - for _, option := range options { - if err := option(registry); err != nil { - return nil, err - } - } - - return registry, nil -} - -// Scope returns the namespace scope for a registry. The registry -// will only serve repositories contained within this scope. -func (reg *registry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -// Repository returns an instance of the repository tied to the registry. -// Instances should not be shared between goroutines but are cheap to -// allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { - if _, err := reference.ParseNamed(canonicalName); err != nil { - return nil, distribution.ErrRepositoryNameInvalid{ - Name: canonicalName, - Reason: err, - } - } - - var descriptorCache distribution.BlobDescriptorService - if reg.blobDescriptorCacheProvider != nil { - var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) - if err != nil { - return nil, err - } - } - - return &repository{ - ctx: ctx, - registry: reg, - name: canonicalName, - descriptorCache: descriptorCache, - }, nil -} - -// repository provides name-scoped access to various services. -type repository struct { - *registry - ctx context.Context - name string - descriptorCache distribution.BlobDescriptorService -} - -// Name returns the name of the repository. -func (repo *repository) Name() string { - return repo.name -} - -func (repo *repository) Tags(ctx context.Context) distribution.TagService { - tags := &tagStore{ - repository: repo, - blobStore: repo.registry.blobStore, - } - - return tags -} - -// Manifests returns an instance of ManifestService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - manifestLinkPathFns := []linkPathFunc{ - // NOTE(stevvooe): Need to search through multiple locations since - // 2.1.0 unintentionally linked into _layers. - manifestRevisionLinkPath, - blobLinkPath, - } - - blobStore := &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - }, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - } - - ms := &manifestStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - schema1Handler: &signedManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - signatures: &signatureStore{ - ctx: ctx, - repository: repo, - blobStore: repo.blobStore, - }, - }, - schema2Handler: &schema2ManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - manifestListHandler: &manifestListHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - } - - // Apply options - for _, option := range options { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - - return ms, nil -} - -// Blobs returns an instance of the BlobStore. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { - var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: []linkPathFunc{blobLinkPath}, - } - - if repo.descriptorCache != nil { - statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) - } - - return &linkedBlobStore{ - registry: repo.registry, - blobStore: repo.blobStore, - blobServer: repo.blobServer, - blobAccessController: statter, - repository: repo, - ctx: ctx, - - // TODO(stevvooe): linkPath limits this blob store to only layers. - // This instance cannot be used for manifest checks. - linkPathFns: []linkPathFunc{blobLinkPath}, - deleteEnabled: repo.registry.deleteEnabled, - resumableDigestEnabled: repo.resumableDigestEnabled, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go deleted file mode 100644 index 115786e2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go +++ /dev/null @@ -1,99 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema2" -) - -//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. -type schema2ManifestHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -var _ ManifestHandler = &schema2ManifestHandler{} - -func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") - - var m schema2.DeserializedManifest - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") - - m, ok := manifest.(*schema2.DeserializedManifest) - if !ok { - return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to store -// valid content, leaving trust policies of that content up to consumers. -func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - target := mnfst.Target() - _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) - } - - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go deleted file mode 100644 index ede4e0e2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go +++ /dev/null @@ -1,131 +0,0 @@ -package storage - -import ( - "path" - "sync" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -type signatureStore struct { - repository *repository - blobStore *blobStore - ctx context.Context -} - -func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Name(), - revision: dgst, - }) - - if err != nil { - return nil, err - } - - // Need to append signature digest algorithm to path to get all items. - // Perhaps, this should be in the pathMapper but it feels awkward. This - // can be eliminated by implementing listAll on drivers. - signaturesPath = path.Join(signaturesPath, "sha256") - - signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) - if err != nil { - return nil, err - } - - var wg sync.WaitGroup - type result struct { - index int - signature []byte - err error - } - ch := make(chan result) - - bs := s.linkedBlobStore(s.ctx, dgst) - for i, sigPath := range signaturePaths { - sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) - if err != nil { - context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) - continue - } - - wg.Add(1) - go func(idx int, sigdgst digest.Digest) { - defer wg.Done() - context.GetLogger(s.ctx). - Debugf("fetching signature %q", sigdgst) - - r := result{index: idx} - - if p, err := bs.Get(s.ctx, sigdgst); err != nil { - context.GetLogger(s.ctx). - Errorf("error fetching signature %q: %v", sigdgst, err) - r.err = err - } else { - r.signature = p - } - - ch <- r - }(i, sigdgst) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - // aggregrate the results - signatures := make([][]byte, len(signaturePaths)) -loop: - for { - select { - case result := <-ch: - signatures[result.index] = result.signature - if result.err != nil && err == nil { - // only set the first one. - err = result.err - } - case <-done: - break loop - } - } - - return signatures, err -} - -func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { - bs := s.linkedBlobStore(s.ctx, dgst) - for _, signature := range signatures { - if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { - return err - } - } - return nil -} - -// linkedBlobStore returns the namedBlobStore of the signatures for the -// manifest with the given digest. Effectively, each signature link path -// layout is a unique linked blob store. -func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { - linkpath := func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestSignatureLinkPathSpec{ - name: name, - revision: revision, - signature: dgst, - }) - - } - - return &linkedBlobStore{ - ctx: ctx, - repository: s.repository, - blobStore: s.blobStore, - blobAccessController: &linkedBlobStatter{ - blobStore: s.blobStore, - repository: s.repository, - linkPathFns: []linkPathFunc{linkpath}, - }, - linkPathFns: []linkPathFunc{linkpath}, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go deleted file mode 100644 index 02663226..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go +++ /dev/null @@ -1,150 +0,0 @@ -package storage - -import ( - "encoding/json" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" -) - -// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It -// can unmarshal and put schema1 manifests that have been signed by libtrust. -type signedManifestHandler struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - signatures *signatureStore -} - -var _ ManifestHandler = &signedManifestHandler{} - -func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") - // Fetch the signatures for the manifest - signatures, err := ms.signatures.Get(dgst) - if err != nil { - return nil, err - } - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - return &sm, nil -} - -func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") - - sm, ok := manifest.(*schema1.SignedManifest) - if !ok { - return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { - return "", err - } - - mt := schema1.MediaTypeManifest - payload := sm.Canonical - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return "", err - } - - if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), - }) - } - - if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("invalid manifest name format"), - }) - } - - if len(mnfst.History) != len(mnfst.FSLayers) { - errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", - len(mnfst.History), len(mnfst.FSLayers))) - } - - if _, err := schema1.Verify(&mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - if !skipDependencyVerification { - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go deleted file mode 100644 index df6e8dfa..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go +++ /dev/null @@ -1,191 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var _ distribution.TagService = &tagStore{} - -// tagStore provides methods to manage manifest tags in a backend storage driver. -// This implementation uses the same on-disk layout as the (now deleted) tag -// store. This provides backward compatibility with current registry deployments -// which only makes use of the Digest field of the returned distribution.Descriptor -// but does not enable full roundtripping of Descriptor objects -type tagStore struct { - repository *repository - blobStore *blobStore -} - -// All returns all tags -func (ts *tagStore) All(ctx context.Context) ([]string, error) { - var tags []string - - pathSpec, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), - }) - if err != nil { - return tags, err - } - - entries, err := ts.blobStore.driver.List(ctx, pathSpec) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} - default: - return tags, err - } - } - - for _, entry := range entries { - _, filename := path.Split(entry) - tags = append(tags, filename) - } - - return tags, nil -} - -// exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { - tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), - tag: tag, - }) - - if err != nil { - return false, err - } - - exists, err := exists(ctx, ts.blobStore.driver, tagPath) - if err != nil { - return false, err - } - - return exists, nil -} - -// Tag tags the digest with the given tag, updating the the store to point at -// the current tag. The digest must point to a manifest. -func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), - tag: tag, - }) - - if err != nil { - return err - } - - lbs := ts.linkedBlobStore(ctx, tag) - - // Link into the index - if err := lbs.linkBlob(ctx, desc); err != nil { - return err - } - - // Overwrite the current link - return ts.blobStore.link(ctx, currentPath, desc.Digest) -} - -// resolve the current revision for name and tag. -func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), - tag: tag, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - revision, err := ts.blobStore.readlink(ctx, currentPath) - if err != nil { - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} - } - - return distribution.Descriptor{}, err - } - - return distribution.Descriptor{Digest: revision}, nil -} - -// Untag removes the tag association -func (ts *tagStore) Untag(ctx context.Context, tag string) error { - tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), - tag: tag, - }) - - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.ErrTagUnknown{Tag: tag} - case nil: - break - default: - return err - } - - return ts.blobStore.driver.Delete(ctx, tagPath) -} - -// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one -// to index manifest blobs by tag name. While the tag store doesn't map -// precisely to the linked blob store, using this ensures the links are -// managed via the same code path. -func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { - return &linkedBlobStore{ - blobStore: ts.blobStore, - repository: ts.repository, - ctx: ctx, - linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestTagIndexEntryLinkPathSpec{ - name: name, - tag: tag, - revision: dgst, - }) - - }}, - } -} - -// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by -// digest, tag entries which point to it need to be recovered to avoid dangling tags. -func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { - allTags, err := ts.All(ctx) - switch err.(type) { - case distribution.ErrRepositoryUnknown: - // This tag store has been initialized but not yet populated - break - case nil: - break - default: - return nil, err - } - - var tags []string - for _, tag := range allTags { - tagLinkPathSpec := manifestTagCurrentPathSpec{ - name: ts.repository.Name(), - tag: tag, - } - - tagLinkPath, err := pathFor(tagLinkPathSpec) - tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) - if err != nil { - return nil, err - } - - if tagDigest == desc.Digest { - tags = append(tags, tag) - } - } - - return tags, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go deleted file mode 100644 index 773d7ba0..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// Exists provides a utility method to test whether or not a path exists in -// the given driver. -func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { - if _, err := drv.Stat(ctx, path); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go deleted file mode 100644 index 60d5a2fa..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go +++ /dev/null @@ -1,65 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// vacuum contains functions for cleaning up repositories and blobs -// These functions will only reliably work on strongly consistent -// storage systems. -// https://en.wikipedia.org/wiki/Consistency_model - -// NewVacuum creates a new Vacuum -func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { - return Vacuum{ - ctx: ctx, - driver: driver, - } -} - -// Vacuum removes content from the filesystem -type Vacuum struct { - driver driver.StorageDriver - ctx context.Context -} - -// RemoveBlob removes a blob from the filesystem -func (v Vacuum) RemoveBlob(dgst string) error { - d, err := digest.ParseDigest(dgst) - if err != nil { - return err - } - - blobPath, err := pathFor(blobDataPathSpec{digest: d}) - if err != nil { - return err - } - context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) - err = v.driver.Delete(v.ctx, blobPath) - if err != nil { - return err - } - - return nil -} - -// RemoveRepository removes a repository directory from the -// filesystem -func (v Vacuum) RemoveRepository(repoName string) error { - rootForRepository, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return err - } - repoDir := path.Join(rootForRepository, repoName) - context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) - err = v.driver.Delete(v.ctx, repoDir) - if err != nil { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go deleted file mode 100644 index d979796e..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go +++ /dev/null @@ -1,59 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "sort" - - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// ErrSkipDir is used as a return value from onFileFunc to indicate that -// the directory named in the call is to be skipped. It is not returned -// as an error by any function. -var ErrSkipDir = errors.New("skip this directory") - -// WalkFn is called once per file by Walk -// If the returned error is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -type WalkFn func(fileInfo storageDriver.FileInfo) error - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(ctx, from) - if err != nil { - return err - } - sort.Stable(sort.StringSlice(children)) - for _, child := range children { - // TODO(stevvooe): Calling driver.Stat for every entry is quite - // expensive when running against backends with a slow Stat - // implementation, such as s3. This is very likely a serious - // performance bottleneck. - fileInfo, err := driver.Stat(ctx, child) - if err != nil { - return err - } - err = f(fileInfo) - skipDir := (err == ErrSkipDir) - if err != nil && !skipDir { - return err - } - - if fileInfo.IsDir() && !skipDir { - if err := Walk(ctx, driver, child, f); err != nil { - return err - } - } - } - return nil -} - -// pushError formats an error type given a path and an error -// and pushes it to a slice of errors -func pushError(errors []error, path string, err error) []error { - return append(errors, fmt.Errorf("%s: %s", path, err)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go deleted file mode 100644 index 00cd8a6a..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go +++ /dev/null @@ -1,148 +0,0 @@ -package testutil - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strings" -) - -// RequestResponseMap is an ordered mapping from Requests to Responses -type RequestResponseMap []RequestResponseMapping - -// RequestResponseMapping defines a Response to be sent in response to a given -// Request -type RequestResponseMapping struct { - Request Request - Response Response -} - -// Request is a simplified http.Request object -type Request struct { - // Method is the http method of the request, for example GET - Method string - - // Route is the http route of this request - Route string - - // QueryParams are the query parameters of this request - QueryParams map[string][]string - - // Body is the byte contents of the http request - Body []byte - - // Headers are the header for this request - Headers http.Header -} - -func (r Request) String() string { - queryString := "" - if len(r.QueryParams) > 0 { - keys := make([]string, 0, len(r.QueryParams)) - queryParts := make([]string, 0, len(r.QueryParams)) - for k := range r.QueryParams { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - for _, val := range r.QueryParams[k] { - queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) - } - } - queryString = "?" + strings.Join(queryParts, "&") - } - var headers []string - if len(r.Headers) > 0 { - var headerKeys []string - for k := range r.Headers { - headerKeys = append(headerKeys, k) - } - sort.Strings(headerKeys) - - for _, k := range headerKeys { - for _, val := range r.Headers[k] { - headers = append(headers, fmt.Sprintf("%s:%s", k, val)) - } - } - - } - return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) -} - -// Response is a simplified http.Response object -type Response struct { - // Statuscode is the http status code of the Response - StatusCode int - - // Headers are the http headers of this Response - Headers http.Header - - // Body is the response body - Body []byte -} - -// testHandler is an http.Handler with a defined mapping from Request to an -// ordered list of Response objects -type testHandler struct { - responseMap map[string][]Response -} - -// NewHandler returns a new test handler that responds to defined requests -// with specified responses -// Each time a Request is received, the next Response is returned in the -// mapping, until no Responses are defined, at which point a 404 is sent back -func NewHandler(requestResponseMap RequestResponseMap) http.Handler { - responseMap := make(map[string][]Response) - for _, mapping := range requestResponseMap { - responses, ok := responseMap[mapping.Request.String()] - if ok { - responseMap[mapping.Request.String()] = append(responses, mapping.Response) - } else { - responseMap[mapping.Request.String()] = []Response{mapping.Response} - } - } - return &testHandler{responseMap: responseMap} -} - -func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - requestBody, _ := ioutil.ReadAll(r.Body) - request := Request{ - Method: r.Method, - Route: r.URL.Path, - QueryParams: r.URL.Query(), - Body: requestBody, - Headers: make(map[string][]string), - } - - // Add headers of interest here - for k, v := range r.Header { - if k == "If-None-Match" { - request.Headers[k] = v - } - } - - responses, ok := app.responseMap[request.String()] - - if !ok || len(responses) == 0 { - http.NotFound(w, r) - return - } - - response := responses[0] - app.responseMap[request.String()] = responses[1:] - - responseHeader := w.Header() - for k, v := range response.Headers { - responseHeader[k] = v - } - - w.WriteHeader(response.StatusCode) - - io.Copy(w, bytes.NewReader(response.Body)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go deleted file mode 100644 index 2c1d2d82..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go +++ /dev/null @@ -1,78 +0,0 @@ -package testutil - -import ( - "archive/tar" - "bytes" - "crypto/rand" - "fmt" - "io" - mrand "math/rand" - "time" - - "github.com/docker/distribution/digest" -) - -// CreateRandomTarFile creates a random tarfile, returning it as an -// io.ReadSeeker along with its digest. An error is returned if there is a -// problem generating valid content. -func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { - nFiles := mrand.Intn(10) + 10 - target := &bytes.Buffer{} - wr := tar.NewWriter(target) - - // Perturb this on each iteration of the loop below. - header := &tar.Header{ - Mode: 0644, - ModTime: time.Now(), - Typeflag: tar.TypeReg, - Uname: "randocalrissian", - Gname: "cloudcity", - AccessTime: time.Now(), - ChangeTime: time.Now(), - } - - for fileNumber := 0; fileNumber < nFiles; fileNumber++ { - fileSize := mrand.Int63n(1<<20) + 1<<20 - - header.Name = fmt.Sprint(fileNumber) - header.Size = fileSize - - if err := wr.WriteHeader(header); err != nil { - return nil, "", err - } - - randomData := make([]byte, fileSize) - - // Fill up the buffer with some random data. - n, err := rand.Read(randomData) - - if n != len(randomData) { - return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) - } - - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(wr, bytes.NewReader(randomData)) - if nn != fileSize { - return nil, "", fmt.Errorf("short copy writing random file to tar") - } - - if err != nil { - return nil, "", err - } - - if err := wr.Flush(); err != nil { - return nil, "", err - } - } - - if err := wr.Close(); err != nil { - return nil, "", err - } - - dgst = digest.FromBytes(target.Bytes()) - - return bytes.NewReader(target.Bytes()), dgst, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/print.go b/Godeps/_workspace/src/github.com/docker/distribution/version/print.go deleted file mode 100644 index a82bce39..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/version/print.go +++ /dev/null @@ -1,26 +0,0 @@ -package version - -import ( - "fmt" - "io" - "os" -) - -// FprintVersion outputs the version string to the writer, in the following -// format, followed by a newline: -// -// -// -// For example, a binary "registry" built from github.com/docker/distribution -// with version "v2.0" would print the following: -// -// registry github.com/docker/distribution v2.0 -// -func FprintVersion(w io.Writer) { - fmt.Fprintln(w, os.Args[0], Package, Version) -} - -// PrintVersion outputs the version information, from Fprint, to stdout. -func PrintVersion() { - FprintVersion(os.Stdout) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go deleted file mode 100644 index 450d15c2..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go +++ /dev/null @@ -1,11 +0,0 @@ -package version - -// Package is the overall, canonical project import path under which the -// package was built. -var Package = "github.com/docker/distribution" - -// Version indicates which version of the binary is running. This is set to -// the latest release tag by hand, always suffixed by "+unknown". During -// build, it will be replaced by the actual version. The value here will be -// used if the registry is run after a go get based install. -var Version = "v2.1.0+unknown" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh b/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh deleted file mode 100644 index 53e29ce9..00000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# This bash script outputs the current, desired content of version.go, using -# git describe. For best effect, pipe this to the target file. Generally, this -# only needs to updated for releases. The actual value of will be replaced -# during build time if the makefile is used. - -set -e - -cat <" instruction. - addTrustedFlags(cmd, true) - - cmd.ParseFlags(args, true) - - var ( - context io.ReadCloser - err error - ) - - specifiedContext := cmd.Arg(0) - - var ( - contextDir string - tempDir string - relDockerfile string - progBuff io.Writer - buildBuff io.Writer - ) - - progBuff = cli.out - buildBuff = cli.out - if *suppressOutput { - progBuff = bytes.NewBuffer(nil) - buildBuff = bytes.NewBuffer(nil) - } - - switch { - case specifiedContext == "-": - context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) - case urlutil.IsGitURL(specifiedContext): - tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) - case urlutil.IsURL(specifiedContext): - context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName) - default: - contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) - } - - if err != nil { - if *suppressOutput && urlutil.IsURL(specifiedContext) { - fmt.Fprintln(cli.err, progBuff) - } - return fmt.Errorf("unable to prepare context: %s", err) - } - - if tempDir != "" { - defer os.RemoveAll(tempDir) - contextDir = tempDir - } - - if context == nil { - // And canonicalize dockerfile name to a platform-independent one - relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) - if err != nil { - return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) - } - - f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return err - } - - var excludes []string - if err == nil { - excludes, err = dockerignore.ReadAll(f) - if err != nil { - return err - } - } - - if err := validateContextDirectory(contextDir, excludes); err != nil { - return fmt.Errorf("Error checking context: '%s'.", err) - } - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The daemon will remove them for us, if needed, after it - // parses the Dockerfile. Ignore errors here, as they will have been - // caught by validateContextDirectory above. - var includes = []string{"."} - keepThem1, _ := fileutils.Matches(".dockerignore", excludes) - keepThem2, _ := fileutils.Matches(relDockerfile, excludes) - if keepThem1 || keepThem2 { - includes = append(includes, ".dockerignore", relDockerfile) - } - - context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: excludes, - IncludeFiles: includes, - }) - if err != nil { - return err - } - } - - var resolvedTags []*resolvedTag - if isTrusted() { - // Wrap the tar archive to replace the Dockerfile entry with the rewritten - // Dockerfile which uses trusted pulls. - context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags) - } - - // Setup an upload progress bar - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - - var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon") - - var memory int64 - if *flMemoryString != "" { - parsedMemory, err := units.RAMInBytes(*flMemoryString) - if err != nil { - return err - } - memory = parsedMemory - } - - var memorySwap int64 - if *flMemorySwap != "" { - if *flMemorySwap == "-1" { - memorySwap = -1 - } else { - parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) - if err != nil { - return err - } - memorySwap = parsedMemorySwap - } - } - - var shmSize int64 - if *flShmSize != "" { - shmSize, err = units.RAMInBytes(*flShmSize) - if err != nil { - return err - } - } - - options := types.ImageBuildOptions{ - Context: body, - Memory: memory, - MemorySwap: memorySwap, - Tags: flTags.GetAll(), - SuppressOutput: *suppressOutput, - NoCache: *noCache, - Remove: *rm, - ForceRemove: *forceRm, - PullParent: *pull, - IsolationLevel: container.IsolationLevel(*isolation), - CPUSetCPUs: *flCPUSetCpus, - CPUSetMems: *flCPUSetMems, - CPUShares: *flCPUShares, - CPUQuota: *flCPUQuota, - CPUPeriod: *flCPUPeriod, - CgroupParent: *flCgroupParent, - Dockerfile: relDockerfile, - ShmSize: shmSize, - Ulimits: flUlimits.GetList(), - BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), - AuthConfigs: cli.configFile.AuthConfigs, - } - - response, err := cli.client.ImageBuild(options) - if err != nil { - return err - } - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - if *suppressOutput { - fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) - } - return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - } - - // Windows: show error message about modified file permissions if the - // daemon isn't running Windows. - if response.OSType != "windows" && runtime.GOOS == "windows" { - fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if *suppressOutput { - fmt.Fprintf(cli.out, "%s", buildBuff) - } - - if isTrusted() { - // Since the build was successful, now we must tag any of the resolved - // images from the above Dockerfile rewrite. - for _, resolved := range resolvedTags { - if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { - return err - } - } - } - - return nil -} - -// validateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func validateContextDirectory(srcPath string, excludes []string) error { - contextRoot, err := getContextRoot(srcPath) - if err != nil { - return err - } - return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -// validateTag checks if the given image name can be resolved. -func validateTag(rawRepo string) (string, error) { - _, err := reference.ParseNamed(rawRepo) - if err != nil { - return "", err - } - - return rawRepo, nil -} - -// isUNC returns true if the path is UNC (one starting \\). It always returns -// false on Linux. -func isUNC(path string) bool { - return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) -} - -// getDockerfileRelPath uses the given context directory for a `docker build` -// and returns the absolute path to the context directory, the relative path of -// the dockerfile in that context directory, and a non-nil error on success. -func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { - if absContextDir, err = filepath.Abs(givenContextDir); err != nil { - return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) - } - - // The context dir might be a symbolic link, so follow it to the actual - // target directory. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absContextDir) { - absContextDir, err = filepath.EvalSymlinks(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) - } - } - - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) - } - - if !stat.IsDir() { - return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) - } - - absDockerfile := givenDockerfile - if absDockerfile == "" { - // No -f/--file was specified so use the default relative to the - // context directory. - absDockerfile = filepath.Join(absContextDir, api.DefaultDockerfileName) - - // Just to be nice ;-) look for 'dockerfile' too but only - // use it if we found it, otherwise ignore this check - if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { - altPath := filepath.Join(absContextDir, strings.ToLower(api.DefaultDockerfileName)) - if _, err = os.Lstat(altPath); err == nil { - absDockerfile = altPath - } - } - } - - // If not already an absolute path, the Dockerfile path should be joined to - // the base directory. - if !filepath.IsAbs(absDockerfile) { - absDockerfile = filepath.Join(absContextDir, absDockerfile) - } - - // Evaluate symlinks in the path to the Dockerfile too. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absDockerfile) { - absDockerfile, err = filepath.EvalSymlinks(absDockerfile) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) - } - } - - if _, err := os.Lstat(absDockerfile); err != nil { - if os.IsNotExist(err) { - return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) - } - return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) - } - - if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { - return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) - } - - if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { - return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) - } - - return absContextDir, relDockerfile, nil -} - -// writeToFile copies from the given reader and writes it to a file with the -// given filename. -func writeToFile(r io.Reader, filename string) error { - file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to create file: %v", err) - } - defer file.Close() - - if _, err := io.Copy(file, r); err != nil { - return fmt.Errorf("unable to write file: %v", err) - } - - return nil -} - -// getContextFromReader will read the contents of the given reader as either a -// Dockerfile or tar archive. Returns a tar archive used as a context and a -// path to the Dockerfile inside the tar. -func getContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { - buf := bufio.NewReader(r) - - magic, err := buf.Peek(archive.HeaderSize) - if err != nil && err != io.EOF { - return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) - } - - if archive.IsArchive(magic) { - return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil - } - - // Input should be read as a Dockerfile. - tmpDir, err := ioutil.TempDir("", "docker-build-context-") - if err != nil { - return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) - } - - f, err := os.Create(filepath.Join(tmpDir, api.DefaultDockerfileName)) - if err != nil { - return nil, "", err - } - _, err = io.Copy(f, buf) - if err != nil { - f.Close() - return nil, "", err - } - - if err := f.Close(); err != nil { - return nil, "", err - } - if err := r.Close(); err != nil { - return nil, "", err - } - - tar, err := archive.Tar(tmpDir, archive.Uncompressed) - if err != nil { - return nil, "", err - } - - return ioutils.NewReadCloserWrapper(tar, func() error { - err := tar.Close() - os.RemoveAll(tmpDir) - return err - }), api.DefaultDockerfileName, nil - -} - -// getContextFromGitURL uses a Git URL as context for a `docker build`. The -// git repo is cloned into a temporary directory used as the context directory. -// Returns the absolute path to the temporary context directory, the relative -// path of the dockerfile in that context directory, and a non-nil error on -// success. -func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { - if _, err := exec.LookPath("git"); err != nil { - return "", "", fmt.Errorf("unable to find 'git': %v", err) - } - if absContextDir, err = gitutils.Clone(gitURL); err != nil { - return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) - } - - return getDockerfileRelPath(absContextDir, dockerfileName) -} - -// getContextFromURL uses a remote URL as context for a `docker build`. The -// remote resource is downloaded as either a Dockerfile or a tar archive. -// Returns the tar archive used for the context and a path of the -// dockerfile inside the tar. -func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { - response, err := httputils.Download(remoteURL) - if err != nil { - return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) - } - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) - - // Pass the response body through a progress reader. - progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) - - return getContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) -} - -// getContextFromLocalDir uses the given local directory as context for a -// `docker build`. Returns the absolute path to the local context directory, -// the relative path of the dockerfile in that context directory, and a non-nil -// error on success. -func getContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { - // When using a local context directory, when the Dockerfile is specified - // with the `-f/--file` option then it is considered relative to the - // current directory and not the context directory. - if dockerfileName != "" { - if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { - return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) - } - } - - return getDockerfileRelPath(localDir, dockerfileName) -} - -var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) - -// resolvedTag records the repository, tag, and resolved digest reference -// from a Dockerfile rewrite. -type resolvedTag struct { - digestRef reference.Canonical - tagRef reference.NamedTagged -} - -// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in -// "FROM " instructions to a digest reference. `translator` is a -// function that takes a repository name and tag reference and returns a -// trusted digest reference. -func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { - scanner := bufio.NewScanner(dockerfile) - buf := bytes.NewBuffer(nil) - - // Scan the lines of the Dockerfile, looking for a "FROM" line. - for scanner.Scan() { - line := scanner.Text() - - matches := dockerfileFromLinePattern.FindStringSubmatch(line) - if matches != nil && matches[1] != api.NoBaseImageSpecifier { - // Replace the line with a resolved "FROM repo@digest" - ref, err := reference.ParseNamed(matches[1]) - if err != nil { - return nil, nil, err - } - ref = reference.WithDefaultTag(ref) - if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { - trustedRef, err := translator(ref) - if err != nil { - return nil, nil, err - } - - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) - resolvedTags = append(resolvedTags, &resolvedTag{ - digestRef: trustedRef, - tagRef: ref, - }) - } - } - - _, err := fmt.Fprintln(buf, line) - if err != nil { - return nil, nil, err - } - } - - return buf.Bytes(), resolvedTags, scanner.Err() -} - -// replaceDockerfileTarWrapper wraps the given input tar archive stream and -// replaces the entry with the given Dockerfile name with the contents of the -// new Dockerfile. Returns a new tar archive stream with the replaced -// Dockerfile. -func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - - defer inputTarStream.Close() - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - // Signals end of archive. - tarWriter.Close() - pipeWriter.Close() - return - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - var content io.Reader = tarReader - if hdr.Name == dockerfileName { - // This entry is the Dockerfile. Since the tar archive was - // generated from a directory on the local filesystem, the - // Dockerfile will only appear once in the archive. - var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(content, translator) - if err != nil { - pipeWriter.CloseWithError(err) - return - } - hdr.Size = int64(len(newDockerfile)) - content = bytes.NewBuffer(newDockerfile) - } - - if err := tarWriter.WriteHeader(hdr); err != nil { - pipeWriter.CloseWithError(err) - return - } - - if _, err := io.Copy(tarWriter, content); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - }() - - return pipeReader -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go deleted file mode 100644 index b1637b02..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go +++ /dev/null @@ -1,196 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "runtime" - - "github.com/docker/docker/api" - "github.com/docker/docker/cli" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/client" - "github.com/docker/go-connections/tlsconfig" -) - -// DockerCli represents the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - // initializing closure - init func() error - - // configFile has the client configuration file - configFile *cliconfig.ConfigFile - // in holds the input stream and closer (io.ReadCloser) for the client. - in io.ReadCloser - // out holds the output stream (io.Writer) for the client. - out io.Writer - // err holds the error stream (io.Writer) for the client. - err io.Writer - // keyFile holds the key file as a string. - keyFile string - // inFd holds the file descriptor of the client's STDIN (if valid). - inFd uintptr - // outFd holds file descriptor of the client's STDOUT (if valid). - outFd uintptr - // isTerminalIn indicates whether the client's STDIN is a TTY - isTerminalIn bool - // isTerminalOut indicates whether the client's STDOUT is a TTY - isTerminalOut bool - // client is the http client that performs all API operations - client client.APIClient - // state holds the terminal state - state *term.State -} - -// Initialize calls the init function that will setup the configuration for the client -// such as the TLS, tcp and other parameters used to run the client. -func (cli *DockerCli) Initialize() error { - if cli.init == nil { - return nil - } - return cli.init() -} - -// CheckTtyInput checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !cli.isTerminalIn { - return errors.New("cannot enable tty mode on non tty input") - } - return nil -} - -// PsFormat returns the format string specified in the configuration. -// String contains columns and format specification, for example {{ID}}\t{{Name}}. -func (cli *DockerCli) PsFormat() string { - return cli.configFile.PsFormat -} - -// ImagesFormat returns the format string specified in the configuration. -// String contains columns and format specification, for example {{ID}}\t{{Name}}. -func (cli *DockerCli) ImagesFormat() string { - return cli.configFile.ImagesFormat -} - -func (cli *DockerCli) setRawTerminal() error { - if cli.isTerminalIn && os.Getenv("NORAW") == "" { - state, err := term.SetRawTerminal(cli.inFd) - if err != nil { - return err - } - cli.state = state - } - return nil -} - -func (cli *DockerCli) restoreTerminal(in io.Closer) error { - if cli.state != nil { - term.RestoreTerminal(cli.inFd, cli.state) - } - if in != nil { - return in.Close() - } - return nil -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config -// is set the client scheme will be set to https. -// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). -func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { - cli := &DockerCli{ - in: in, - out: out, - err: err, - keyFile: clientFlags.Common.TrustKey, - } - - cli.init = func() error { - clientFlags.PostParse() - configFile, e := cliconfig.Load(cliconfig.ConfigDir()) - if e != nil { - fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) - } - cli.configFile = configFile - - host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) - if err != nil { - return err - } - - customHeaders := cli.configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" - - verStr := api.DefaultVersion.String() - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - clientTransport, err := newClientTransport(clientFlags.Common.TLSOptions) - if err != nil { - return err - } - - client, err := client.NewClient(host, verStr, clientTransport, customHeaders) - if err != nil { - return err - } - cli.client = client - - if cli.in != nil { - cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) - } - if cli.out != nil { - cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) - } - - return nil - } - - return cli -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - defaultHost := opts.DefaultTCPHost - if tlsOptions != nil { - defaultHost = opts.DefaultTLSHost - } - - host, err = opts.ParseHost(defaultHost, host) - return -} - -func newClientTransport(tlsOptions *tlsconfig.Options) (*http.Transport, error) { - if tlsOptions == nil { - return &http.Transport{}, nil - } - - config, err := tlsconfig.Client(*tlsOptions) - if err != nil { - return nil, err - } - return &http.Transport{ - TLSClientConfig: config, - }, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go deleted file mode 100644 index 4cfce5f6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/client.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package client provides a command-line interface for Docker. -// -// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. -// See https://docs.docker.com/installation/ for instructions on installing Docker. -package client diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go deleted file mode 100644 index 4c0d5c28..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/commit.go +++ /dev/null @@ -1,83 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -// CmdCommit creates a new image from a container's changes. -// -// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] -func (cli *DockerCli) CmdCommit(args ...string) error { - cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) - flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") - flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") - flAuthor := cmd.String([]string{"a", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") - flChanges := opts.NewListOpts(nil) - cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") - // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. - flConfig := cmd.String([]string{"#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") - cmd.Require(flag.Max, 2) - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var ( - name = cmd.Arg(0) - repositoryAndTag = cmd.Arg(1) - repositoryName string - tag string - ) - - //Check if the given image name can be resolved - if repositoryAndTag != "" { - ref, err := reference.ParseNamed(repositoryAndTag) - if err != nil { - return err - } - - repositoryName = ref.Name() - - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot commit to digest reference") - case reference.NamedTagged: - tag = x.Tag() - } - } - - var config *container.Config - if *flConfig != "" { - config = &container.Config{} - if err := json.Unmarshal([]byte(*flConfig), config); err != nil { - return err - } - } - - options := types.ContainerCommitOptions{ - ContainerID: name, - RepositoryName: repositoryName, - Tag: tag, - Comment: *flComment, - Author: *flAuthor, - Changes: flChanges.GetAll(), - Pause: *flPause, - Config: config, - } - - response, err := cli.client.ContainerCommit(options) - if err != nil { - return err - } - - fmt.Fprintln(cli.out, response.ID) - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go deleted file mode 100644 index 43573d90..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/cp.go +++ /dev/null @@ -1,296 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/archive" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" -) - -type copyDirection int - -const ( - fromContainer copyDirection = (1 << iota) - toContainer - acrossContainers = fromContainer | toContainer -) - -type cpConfig struct { - followLink bool -} - -// CmdCp copies files/folders to or from a path in a container. -// -// When copying from a container, if DEST_PATH is '-' the data is written as a -// tar archive file to STDOUT. -// -// When copying to a container, if SRC_PATH is '-' the data is read as a tar -// archive file from STDIN, and the destination CONTAINER:DEST_PATH, must specify -// a directory. -// -// Usage: -// docker cp CONTAINER:SRC_PATH DEST_PATH|- -// docker cp SRC_PATH|- CONTAINER:DEST_PATH -func (cli *DockerCli) CmdCp(args ...string) error { - cmd := Cli.Subcmd( - "cp", - []string{"CONTAINER:SRC_PATH DEST_PATH|-", "SRC_PATH|- CONTAINER:DEST_PATH"}, - strings.Join([]string{ - Cli.DockerCommands["cp"].Description, - "\nUse '-' as the source to read a tar archive from stdin\n", - "and extract it to a directory destination in a container.\n", - "Use '-' as the destination to stream a tar archive of a\n", - "container source to stdout.", - }, ""), - true, - ) - - followLink := cmd.Bool([]string{"L", "-follow-link"}, false, "Always follow symbol link in SRC_PATH") - - cmd.Require(flag.Exact, 2) - cmd.ParseFlags(args, true) - - if cmd.Arg(0) == "" { - return fmt.Errorf("source can not be empty") - } - if cmd.Arg(1) == "" { - return fmt.Errorf("destination can not be empty") - } - - srcContainer, srcPath := splitCpArg(cmd.Arg(0)) - dstContainer, dstPath := splitCpArg(cmd.Arg(1)) - - var direction copyDirection - if srcContainer != "" { - direction |= fromContainer - } - if dstContainer != "" { - direction |= toContainer - } - - cpParam := &cpConfig{ - followLink: *followLink, - } - - switch direction { - case fromContainer: - return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) - case toContainer: - return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) - case acrossContainers: - // Copying between containers isn't supported. - return fmt.Errorf("copying between containers is not supported") - default: - // User didn't specify any container. - return fmt.Errorf("must specify at least one container source") - } -} - -// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be -// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by -// requiring a LOCALPATH with a `:` to be made explicit with a relative or -// absolute path: -// `/path/to/file:name.txt` or `./file:name.txt` -// -// This is apparently how `scp` handles this as well: -// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ -// -// We can't simply check for a filepath separator because container names may -// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, -// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows -// client, a `:` could be part of an absolute Windows path, in which case it -// is immediately proceeded by a backslash. -func splitCpArg(arg string) (container, path string) { - if system.IsAbs(arg) { - // Explicit local absolute path, e.g., `C:\foo` or `/foo`. - return "", arg - } - - parts := strings.SplitN(arg, ":", 2) - - if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { - // Either there's no `:` in the arg - // OR it's an explicit local relative path like `./file:name.txt`. - return "", arg - } - - return parts[0], parts[1] -} - -func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { - return cli.client.ContainerStatPath(containerName, path) -} - -func resolveLocalPath(localPath string) (absPath string, err error) { - if absPath, err = filepath.Abs(localPath); err != nil { - return - } - - return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil -} - -func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { - if dstPath != "-" { - // Get an absolute destination path. - dstPath, err = resolveLocalPath(dstPath) - if err != nil { - return err - } - } - - // if client requests to follow symbol link, then must decide target file to be copied - var rebaseName string - if cpParam.followLink { - srcStat, err := cli.statContainerPath(srcContainer, srcPath) - - // If the destination is a symbolic link, we should follow it. - if err == nil && srcStat.Mode&os.ModeSymlink != 0 { - linkTarget := srcStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - srcParent, _ := archive.SplitPathDirEntry(srcPath) - linkTarget = filepath.Join(srcParent, linkTarget) - } - - linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) - srcPath = linkTarget - } - - } - - content, stat, err := cli.client.CopyFromContainer(srcContainer, srcPath) - if err != nil { - return err - } - defer content.Close() - - if dstPath == "-" { - // Send the response to STDOUT. - _, err = io.Copy(os.Stdout, content) - - return err - } - - // Prepare source copy info. - srcInfo := archive.CopyInfo{ - Path: srcPath, - Exists: true, - IsDir: stat.Mode.IsDir(), - RebaseName: rebaseName, - } - - preArchive := content - if len(srcInfo.RebaseName) != 0 { - _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) - preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) - } - // See comments in the implementation of `archive.CopyTo` for exactly what - // goes into deciding how and whether the source archive needs to be - // altered for the correct copy behavior. - return archive.CopyTo(preArchive, srcInfo, dstPath) -} - -func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { - if srcPath != "-" { - // Get an absolute source path. - srcPath, err = resolveLocalPath(srcPath) - if err != nil { - return err - } - } - - // In order to get the copy behavior right, we need to know information - // about both the source and destination. The API is a simple tar - // archive/extract API but we can use the stat info header about the - // destination to be more informed about exactly what the destination is. - - // Prepare destination copy info by stat-ing the container path. - dstInfo := archive.CopyInfo{Path: dstPath} - dstStat, err := cli.statContainerPath(dstContainer, dstPath) - - // If the destination is a symbolic link, we should evaluate it. - if err == nil && dstStat.Mode&os.ModeSymlink != 0 { - linkTarget := dstStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := archive.SplitPathDirEntry(dstPath) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - dstInfo.Path = linkTarget - dstStat, err = cli.statContainerPath(dstContainer, linkTarget) - } - - // Ignore any error and assume that the parent directory of the destination - // path exists, in which case the copy may still succeed. If there is any - // type of conflict (e.g., non-directory overwriting an existing directory - // or vice versa) the extraction will fail. If the destination simply did - // not exist, but the parent directory does, the extraction will still - // succeed. - if err == nil { - dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() - } - - var ( - content io.Reader - resolvedDstPath string - ) - - if srcPath == "-" { - // Use STDIN. - content = os.Stdin - resolvedDstPath = dstInfo.Path - if !dstInfo.IsDir { - return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) - } - } else { - // Prepare source copy info. - srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) - if err != nil { - return err - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return err - } - defer srcArchive.Close() - - // With the stat info about the local source as well as the - // destination, we have enough information to know whether we need to - // alter the archive that we upload so that when the server extracts - // it to the specified directory in the container we get the desired - // copy behavior. - - // See comments in the implementation of `archive.PrepareArchiveCopy` - // for exactly what goes into deciding how and whether the source - // archive needs to be altered for the correct copy behavior when it is - // extracted. This function also infers from the source and destination - // info which directory to extract to, which may be the parent of the - // destination that the user specified. - dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return err - } - defer preparedArchive.Close() - - resolvedDstPath = dstDir - content = preparedArchive - } - - options := types.CopyToContainerOptions{ - ContainerID: dstContainer, - Path: resolvedDstPath, - Content: content, - AllowOverwriteDirWithFile: false, - } - - return cli.client.CopyToContainer(options) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go deleted file mode 100644 index 3cf8a7a7..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/create.go +++ /dev/null @@ -1,177 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -func (cli *DockerCli) pullImage(image string) error { - return cli.pullImageCustomOut(image, cli.out) -} - -func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - - var tag string - switch x := reference.WithDefaultTag(ref).(type) { - case reference.Canonical: - tag = x.Digest().String() - case reference.NamedTagged: - tag = x.Tag() - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - // Resolve the Auth config relevant for this server - encodedAuth, err := cli.encodeRegistryAuth(repoInfo.Index) - if err != nil { - return err - } - - options := types.ImageCreateOptions{ - Parent: ref.Name(), - Tag: tag, - RegistryAuth: encodedAuth, - } - - responseBody, err := cli.client.ImageCreate(options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, out, cli.outFd, cli.isTerminalOut, nil) -} - -type cidFile struct { - path string - file *os.File - written bool -} - -func newCIDFile(path string) (*cidFile, error) { - if _, err := os.Stat(path); err == nil { - return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) - } - - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("Failed to create the container ID file: %s", err) - } - - return &cidFile{path: path, file: f}, nil -} - -func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { - var containerIDFile *cidFile - if cidfile != "" { - var err error - if containerIDFile, err = newCIDFile(cidfile); err != nil { - return nil, err - } - defer containerIDFile.Close() - } - - ref, err := reference.ParseNamed(config.Image) - if err != nil { - return nil, err - } - ref = reference.WithDefaultTag(ref) - - var trustedRef reference.Canonical - - if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { - var err error - trustedRef, err = cli.trustedReference(ref) - if err != nil { - return nil, err - } - config.Image = trustedRef.String() - } - - //create the container - response, err := cli.client.ContainerCreate(config, hostConfig, networkingConfig, name) - - //if image not found try to pull it - if err != nil { - if client.IsErrImageNotFound(err) { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) - - // we don't want to write to stdout anything apart from container.ID - if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { - return nil, err - } - if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { - if err := cli.tagTrusted(trustedRef, ref); err != nil { - return nil, err - } - } - // Retry - var retryErr error - response, retryErr = cli.client.ContainerCreate(config, hostConfig, networkingConfig, name) - if retryErr != nil { - return nil, retryErr - } - } else { - return nil, err - } - } - - for _, warning := range response.Warnings { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } - if containerIDFile != nil { - if err = containerIDFile.Write(response.ID); err != nil { - return nil, err - } - } - return &response, nil -} - -// CmdCreate creates a new container from a given image. -// -// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] -func (cli *DockerCli) CmdCreate(args ...string) error { - cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) - addTrustedFlags(cmd, true) - - // These are flags not stored in Config/HostConfig - var ( - flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") - ) - - config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) - - if err != nil { - cmd.ReportError(err.Error(), true) - os.Exit(1) - } - if config.Image == "" { - cmd.Usage() - return nil - } - response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "%s\n", response.ID) - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go deleted file mode 100644 index f180e001..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/diff.go +++ /dev/null @@ -1,47 +0,0 @@ -package client - -import ( - "fmt" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/archive" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdDiff shows changes on a container's filesystem. -// -// Each changed file is printed on a separate line, prefixed with a single -// character that indicates the status of the file: C (modified), A (added), -// or D (deleted). -// -// Usage: docker diff CONTAINER -func (cli *DockerCli) CmdDiff(args ...string) error { - cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - - if cmd.Arg(0) == "" { - return fmt.Errorf("Container name cannot be empty") - } - - changes, err := cli.client.ContainerDiff(cmd.Arg(0)) - if err != nil { - return err - } - - for _, change := range changes { - var kind string - switch change.Kind { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go deleted file mode 100644 index c2a6ab3b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/events.go +++ /dev/null @@ -1,108 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/jsonlog" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" - eventtypes "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" -) - -// CmdEvents prints a live stream of real time events from the server. -// -// Usage: docker events [OPTIONS] -func (cli *DockerCli) CmdEvents(args ...string) error { - cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) - since := cmd.String([]string{"-since"}, "", "Show all events created since timestamp") - until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") - flFilter := opts.NewListOpts(nil) - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - cmd.Require(flag.Exact, 0) - - cmd.ParseFlags(args, true) - - eventFilterArgs := filters.NewArgs() - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - for _, f := range flFilter.GetAll() { - var err error - eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) - if err != nil { - return err - } - } - - options := types.EventsOptions{ - Since: *since, - Until: *until, - Filters: eventFilterArgs, - } - - responseBody, err := cli.client.Events(options) - if err != nil { - return err - } - defer responseBody.Close() - - return streamEvents(responseBody, cli.out) -} - -// streamEvents decodes prints the incoming events in the provided output. -func streamEvents(input io.Reader, output io.Writer) error { - return decodeEvents(input, func(event eventtypes.Message, err error) error { - if err != nil { - return err - } - printOutput(event, output) - return nil - }) -} - -type eventProcessor func(event eventtypes.Message, err error) error - -func decodeEvents(input io.Reader, ep eventProcessor) error { - dec := json.NewDecoder(input) - for { - var event eventtypes.Message - err := dec.Decode(&event) - if err != nil && err == io.EOF { - break - } - - if procErr := ep(event, err); procErr != nil { - return procErr - } - } - return nil -} - -// printOutput prints all types of event information. -// Each output includes the event type, actor id, name and action. -// Actor attributes are printed at the end if the actor has any. -func printOutput(event eventtypes.Message, output io.Writer) { - if event.TimeNano != 0 { - fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if event.Time != 0 { - fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - - fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) - - if len(event.Actor.Attributes) > 0 { - var attrs []string - for k, v := range event.Actor.Attributes { - attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) - } - fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) - } - fmt.Fprint(output, "\n") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go deleted file mode 100644 index 68b7c16e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/exec.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "fmt" - "io" - - "github.com/Sirupsen/logrus" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/promise" - "github.com/docker/engine-api/types" -) - -// CmdExec runs a command in a running container. -// -// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] -func (cli *DockerCli) CmdExec(args ...string) error { - cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) - detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") - - execConfig, err := ParseExec(cmd, args) - // just in case the ParseExec does not exit - if execConfig.Container == "" || err != nil { - return Cli.StatusError{StatusCode: 1} - } - - if *detachKeys != "" { - cli.configFile.DetachKeys = *detachKeys - } - - // Send client escape keys - execConfig.DetachKeys = cli.configFile.DetachKeys - - response, err := cli.client.ContainerExecCreate(*execConfig) - if err != nil { - return err - } - - execID := response.ID - if execID == "" { - fmt.Fprintf(cli.out, "exec ID empty") - return nil - } - - //Temp struct for execStart so that we don't need to transfer all the execConfig - if !execConfig.Detach { - if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { - return err - } - } else { - execStartCheck := types.ExecStartCheck{ - Detach: execConfig.Detach, - Tty: execConfig.Tty, - } - - if err := cli.client.ContainerExecStart(execID, execStartCheck); err != nil { - return err - } - // For now don't print this - wait for when we support exec wait() - // fmt.Fprintf(cli.out, "%s\n", execID) - return nil - } - - // Interactive exec requested. - var ( - out, stderr io.Writer - in io.ReadCloser - errCh chan error - ) - - if execConfig.AttachStdin { - in = cli.in - } - if execConfig.AttachStdout { - out = cli.out - } - if execConfig.AttachStderr { - if execConfig.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - resp, err := cli.client.ContainerExecAttach(execID, *execConfig) - if err != nil { - return err - } - defer resp.Close() - if in != nil && execConfig.Tty { - if err := cli.setRawTerminal(); err != nil { - return err - } - defer cli.restoreTerminal(in) - } - errCh = promise.Go(func() error { - return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) - }) - - if execConfig.Tty && cli.isTerminalIn { - if err := cli.monitorTtySize(execID, true); err != nil { - fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) - } - } - - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - - var status int - if _, status, err = getExecExitCode(cli, execID); err != nil { - return err - } - - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - - return nil -} - -// ParseExec parses the specified args for the specified command and generates -// an ExecConfig from it. -// If the minimal number of specified args is not right or if specified args are -// not valid, it will return an error. -func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { - var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") - execCmd []string - container string - ) - cmd.Require(flag.Min, 2) - if err := cmd.ParseFlags(args, true); err != nil { - return nil, err - } - container = cmd.Arg(0) - parsedArgs := cmd.Args() - execCmd = parsedArgs[1:] - - execConfig := &types.ExecConfig{ - User: *flUser, - Privileged: *flPrivileged, - Tty: *flTty, - Cmd: execCmd, - Container: container, - Detach: *flDetach, - } - - // If -d is not set, attach to everything by default - if !*flDetach { - execConfig.AttachStdout = true - execConfig.AttachStderr = true - if *flStdin { - execConfig.AttachStdin = true - } - } - - return execConfig, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go deleted file mode 100644 index ec94f672..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/export.go +++ /dev/null @@ -1,45 +0,0 @@ -package client - -import ( - "errors" - "io" - "os" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdExport exports a filesystem as a tar archive. -// -// The tar archive is streamed to STDOUT by default or written to a file. -// -// Usage: docker export [OPTIONS] CONTAINER -func (cli *DockerCli) CmdExport(args ...string) error { - cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) - outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - - var ( - output = cli.out - err error - ) - if *outfile != "" { - output, err = os.Create(*outfile) - if err != nil { - return err - } - } else if cli.isTerminalOut { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - responseBody, err := cli.client.ContainerExport(cmd.Arg(0)) - if err != nil { - return err - } - defer responseBody.Close() - - _, err = io.Copy(output, responseBody) - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/custom.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/custom.go deleted file mode 100644 index 8a680705..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/custom.go +++ /dev/null @@ -1,222 +0,0 @@ -package formatter - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" -) - -const ( - tableKey = "table" - - containerIDHeader = "CONTAINER ID" - imageHeader = "IMAGE" - namesHeader = "NAMES" - commandHeader = "COMMAND" - createdSinceHeader = "CREATED" - createdAtHeader = "CREATED AT" - runningForHeader = "CREATED" - statusHeader = "STATUS" - portsHeader = "PORTS" - sizeHeader = "SIZE" - labelsHeader = "LABELS" - imageIDHeader = "IMAGE ID" - repositoryHeader = "REPOSITORY" - tagHeader = "TAG" - digestHeader = "DIGEST" -) - -type containerContext struct { - baseSubContext - trunc bool - c types.Container -} - -func (c *containerContext) ID() string { - c.addHeader(containerIDHeader) - if c.trunc { - return stringid.TruncateID(c.c.ID) - } - return c.c.ID -} - -func (c *containerContext) Names() string { - c.addHeader(namesHeader) - names := stripNamePrefix(c.c.Names) - if c.trunc { - for _, name := range names { - if len(strings.Split(name, "/")) == 1 { - names = []string{name} - break - } - } - } - return strings.Join(names, ",") -} - -func (c *containerContext) Image() string { - c.addHeader(imageHeader) - if c.c.Image == "" { - return "" - } - if c.trunc { - if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { - return trunc - } - } - return c.c.Image -} - -func (c *containerContext) Command() string { - c.addHeader(commandHeader) - command := c.c.Command - if c.trunc { - command = stringutils.Truncate(command, 20) - } - return strconv.Quote(command) -} - -func (c *containerContext) CreatedAt() string { - c.addHeader(createdAtHeader) - return time.Unix(int64(c.c.Created), 0).String() -} - -func (c *containerContext) RunningFor() string { - c.addHeader(runningForHeader) - createdAt := time.Unix(int64(c.c.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *containerContext) Ports() string { - c.addHeader(portsHeader) - return api.DisplayablePorts(c.c.Ports) -} - -func (c *containerContext) Status() string { - c.addHeader(statusHeader) - return c.c.Status -} - -func (c *containerContext) Size() string { - c.addHeader(sizeHeader) - srw := units.HumanSize(float64(c.c.SizeRw)) - sv := units.HumanSize(float64(c.c.SizeRootFs)) - - sf := srw - if c.c.SizeRootFs > 0 { - sf = fmt.Sprintf("%s (virtual %s)", srw, sv) - } - return sf -} - -func (c *containerContext) Labels() string { - c.addHeader(labelsHeader) - if c.c.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.c.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *containerContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - c.addHeader(h) - - if c.c.Labels == nil { - return "" - } - return c.c.Labels[name] -} - -type imageContext struct { - baseSubContext - trunc bool - i types.Image - repo string - tag string - digest string -} - -func (c *imageContext) ID() string { - c.addHeader(imageIDHeader) - if c.trunc { - return stringid.TruncateID(c.i.ID) - } - return c.i.ID -} - -func (c *imageContext) Repository() string { - c.addHeader(repositoryHeader) - return c.repo -} - -func (c *imageContext) Tag() string { - c.addHeader(tagHeader) - return c.tag -} - -func (c *imageContext) Digest() string { - c.addHeader(digestHeader) - return c.digest -} - -func (c *imageContext) CreatedSince() string { - c.addHeader(createdSinceHeader) - createdAt := time.Unix(int64(c.i.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *imageContext) CreatedAt() string { - c.addHeader(createdAtHeader) - return time.Unix(int64(c.i.Created), 0).String() -} - -func (c *imageContext) Size() string { - c.addHeader(sizeHeader) - return units.HumanSize(float64(c.i.Size)) -} - -type subContext interface { - fullHeader() string - addHeader(header string) -} - -type baseSubContext struct { - header []string -} - -func (c *baseSubContext) fullHeader() string { - if c.header == nil { - return "" - } - return strings.Join(c.header, "\t") -} - -func (c *baseSubContext) addHeader(header string) { - if c.header == nil { - c.header = []string{} - } - c.header = append(c.header, strings.ToUpper(header)) -} - -func stripNamePrefix(ss []string) []string { - for i, s := range ss { - ss[i] = s[1:] - } - - return ss -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/formatter.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/formatter.go deleted file mode 100644 index 749148ac..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/formatter/formatter.go +++ /dev/null @@ -1,254 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "io" - "strings" - "text/tabwriter" - "text/template" - - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -const ( - tableFormatKey = "table" - rawFormatKey = "raw" - - defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" - defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultQuietFormat = "{{.ID}}" -) - -// Context contains information required by the formatter to print the output as desired. -type Context struct { - // Output is the output stream to which the formatted string is written. - Output io.Writer - // Format is used to choose raw, table or custom format for the output. - Format string - // Quiet when set to true will simply print minimal information. - Quiet bool - // Trunc when set to true will truncate the output of certain fields such as Container ID. - Trunc bool - - // internal element - table bool - finalFormat string - header string - buffer *bytes.Buffer -} - -func (c *Context) preformat() { - c.finalFormat = c.Format - - if strings.HasPrefix(c.Format, tableKey) { - c.table = true - c.finalFormat = c.finalFormat[len(tableKey):] - } - - c.finalFormat = strings.Trim(c.finalFormat, " ") - r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") - c.finalFormat = r.Replace(c.finalFormat) -} - -func (c *Context) parseFormat() (*template.Template, error) { - tmpl, err := template.New("").Parse(c.finalFormat) - if err != nil { - c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) - c.buffer.WriteTo(c.Output) - } - return tmpl, err -} - -func (c *Context) postformat(tmpl *template.Template, subContext subContext) { - if c.table { - if len(c.header) == 0 { - // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template - tmpl.Execute(bytes.NewBufferString(""), subContext) - c.header = subContext.fullHeader() - } - - t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) - t.Write([]byte(c.header)) - t.Write([]byte("\n")) - c.buffer.WriteTo(t) - t.Flush() - } else { - c.buffer.WriteTo(c.Output) - } -} - -func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { - if err := tmpl.Execute(c.buffer, subContext); err != nil { - c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) - c.buffer.WriteTo(c.Output) - return err - } - if c.table && len(c.header) == 0 { - c.header = subContext.fullHeader() - } - c.buffer.WriteString("\n") - return nil -} - -// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. -type ContainerContext struct { - Context - // Size when set to true will display the size of the output. - Size bool - // Containers - Containers []types.Container -} - -// ImageContext contains image specific information required by the formater, encapsulate a Context struct. -type ImageContext struct { - Context - Digest bool - // Images - Images []types.Image -} - -func (ctx ContainerContext) Write() { - switch ctx.Format { - case tableFormatKey: - ctx.Format = defaultContainerTableFormat - if ctx.Quiet { - ctx.Format = defaultQuietFormat - } - case rawFormatKey: - if ctx.Quiet { - ctx.Format = `container_id: {{.ID}}` - } else { - ctx.Format = `container_id: {{.ID}} -image: {{.Image}} -command: {{.Command}} -created_at: {{.CreatedAt}} -status: {{.Status}} -names: {{.Names}} -labels: {{.Labels}} -ports: {{.Ports}} -` - if ctx.Size { - ctx.Format += `size: {{.Size}} -` - } - } - } - - ctx.buffer = bytes.NewBufferString("") - ctx.preformat() - if ctx.table && ctx.Size { - ctx.finalFormat += "\t{{.Size}}" - } - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - for _, container := range ctx.Containers { - containerCtx := &containerContext{ - trunc: ctx.Trunc, - c: container, - } - err = ctx.contextFormat(tmpl, containerCtx) - if err != nil { - return - } - } - - ctx.postformat(tmpl, &containerContext{}) -} - -func (ctx ImageContext) Write() { - switch ctx.Format { - case tableFormatKey: - ctx.Format = defaultImageTableFormat - if ctx.Digest { - ctx.Format = defaultImageTableFormatWithDigest - } - if ctx.Quiet { - ctx.Format = defaultQuietFormat - } - case rawFormatKey: - if ctx.Quiet { - ctx.Format = `image_id: {{.ID}}` - } else { - if ctx.Digest { - ctx.Format = `repository: {{ .Repository }} -tag: {{.Tag}} -digest: {{.Digest}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } else { - ctx.Format = `repository: {{ .Repository }} -tag: {{.Tag}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } - } - } - - ctx.buffer = bytes.NewBufferString("") - ctx.preformat() - if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { - ctx.finalFormat += "\t{{.Digest}}" - } - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - for _, image := range ctx.Images { - - repoTags := image.RepoTags - repoDigests := image.RepoDigests - - if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { - // dangling image - clear out either repoTags or repoDigests so we only show it once below - repoDigests = []string{} - } - // combine the tags and digests lists - tagsAndDigests := append(repoTags, repoDigests...) - for _, repoAndRef := range tagsAndDigests { - repo := "" - tag := "" - digest := "" - - if !strings.HasPrefix(repoAndRef, "") { - ref, err := reference.ParseNamed(repoAndRef) - if err != nil { - continue - } - repo = ref.Name() - - switch x := ref.(type) { - case reference.Canonical: - digest = x.Digest().String() - case reference.NamedTagged: - tag = x.Tag() - } - } - imageCtx := &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: digest, - } - err = ctx.contextFormat(tmpl, imageCtx) - if err != nil { - return - } - } - } - - ctx.postformat(tmpl, &imageContext{}) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go deleted file mode 100644 index 4c80fe1c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" -) - -func (cli *DockerCli) holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { - var err error - receiveStdout := make(chan error, 1) - if outputStream != nil || errorStream != nil { - go func() { - // When TTY is ON, use regular copy - if tty && outputStream != nil { - _, err = io.Copy(outputStream, resp.Reader) - } else { - _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) - } - logrus.Debugf("[hijack] End of stdout") - receiveStdout <- err - }() - } - - stdinDone := make(chan struct{}) - go func() { - if inputStream != nil { - io.Copy(resp.Conn, inputStream) - logrus.Debugf("[hijack] End of stdin") - } - - if err := resp.CloseWrite(); err != nil { - logrus.Debugf("Couldn't send EOF: %s", err) - } - close(stdinDone) - }() - - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-stdinDone: - if outputStream != nil || errorStream != nil { - if err := <-receiveStdout; err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go deleted file mode 100644 index 5409b74a..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/history.go +++ /dev/null @@ -1,74 +0,0 @@ -package client - -import ( - "fmt" - "strconv" - "strings" - "text/tabwriter" - "time" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-units" -) - -// CmdHistory shows the history of an image. -// -// Usage: docker history [OPTIONS] IMAGE -func (cli *DockerCli) CmdHistory(args ...string) error { - cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) - human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - - history, err := cli.client.ImageHistory(cmd.Arg(0)) - if err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - - if *quiet { - for _, entry := range history { - if *noTrunc { - fmt.Fprintf(w, "%s\n", entry.ID) - } else { - fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) - } - } - w.Flush() - return nil - } - - var imageID string - var createdBy string - var created string - var size string - - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") - for _, entry := range history { - imageID = entry.ID - createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) - if *noTrunc == false { - createdBy = stringutils.Truncate(createdBy, 45) - imageID = stringid.TruncateID(entry.ID) - } - - if *human { - created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" - size = units.HumanSize(float64(entry.Size)) - } else { - created = time.Unix(entry.Created, 0).Format(time.RFC3339) - size = strconv.FormatInt(entry.Size, 10) - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) - } - w.Flush() - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go deleted file mode 100644 index 2b0b4045..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/images.go +++ /dev/null @@ -1,79 +0,0 @@ -package client - -import ( - "github.com/docker/docker/api/client/formatter" - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. -// -// Usage: docker images [OPTIONS] [REPOSITORY] -func (cli *DockerCli) CmdImages(args ...string) error { - cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") - noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") - showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") - format := cmd.String([]string{"-format"}, "", "Pretty-print images using a Go template") - - flFilter := opts.NewListOpts(nil) - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - cmd.Require(flag.Max, 1) - - cmd.ParseFlags(args, true) - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - imageFilterArgs := filters.NewArgs() - for _, f := range flFilter.GetAll() { - var err error - imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) - if err != nil { - return err - } - } - - var matchName string - if cmd.NArg() == 1 { - matchName = cmd.Arg(0) - } - - options := types.ImageListOptions{ - MatchName: matchName, - All: *all, - Filters: imageFilterArgs, - } - - images, err := cli.client.ImageList(options) - if err != nil { - return err - } - - f := *format - if len(f) == 0 { - if len(cli.ImagesFormat()) > 0 && !*quiet { - f = cli.ImagesFormat() - } else { - f = "table" - } - } - - imagesCtx := formatter.ImageContext{ - Context: formatter.Context{ - Output: cli.out, - Format: f, - Quiet: *quiet, - Trunc: !*noTrunc, - }, - Digest: *showDigests, - Images: images, - } - - imagesCtx.Write() - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go deleted file mode 100644 index 5b812206..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/import.go +++ /dev/null @@ -1,80 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/jsonmessage" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. -// -// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. -// -// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] -func (cli *DockerCli) CmdImport(args ...string) error { - cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) - flChanges := opts.NewListOpts(nil) - cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") - message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var ( - in io.Reader - tag string - src = cmd.Arg(0) - srcName = src - repository = cmd.Arg(1) - changes = flChanges.GetAll() - ) - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") - tag = cmd.Arg(2) - } - - if repository != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNamed(repository); err != nil { - return err - } - } - - if src == "-" { - in = cli.in - } else if !urlutil.IsURL(src) { - srcName = "-" - file, err := os.Open(src) - if err != nil { - return err - } - defer file.Close() - in = file - } - - options := types.ImageImportOptions{ - Source: in, - SourceName: srcName, - RepositoryName: repository, - Message: *message, - Tag: tag, - Changes: changes, - } - - responseBody, err := cli.client.ImageImport(options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go deleted file mode 100644 index 42f06826..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/info.go +++ /dev/null @@ -1,144 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/go-units" -) - -// CmdInfo displays system-wide information. -// -// Usage: docker info -func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) - cmd.Require(flag.Exact, 0) - - cmd.ParseFlags(args, true) - - info, err := cli.client.Info() - if err != nil { - return err - } - - fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) - fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) - fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) - fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) - fmt.Fprintf(cli.out, "Images: %d\n", info.Images) - ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) - ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) - if info.DriverStatus != nil { - for _, pair := range info.DriverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) - - // print a warning if devicemapper is using a loopback file - if pair[0] == "Data loop file" { - fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") - } - } - - } - ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) - ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) - - fmt.Fprintf(cli.out, "Plugins: \n") - fmt.Fprintf(cli.out, " Volume:") - fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) - fmt.Fprintf(cli.out, "\n") - fmt.Fprintf(cli.out, " Network:") - fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) - fmt.Fprintf(cli.out, "\n") - - if len(info.Plugins.Authorization) != 0 { - fmt.Fprintf(cli.out, " Authorization:") - fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) - fmt.Fprintf(cli.out, "\n") - } - - ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) - ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) - ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) - ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) - fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) - fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) - ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) - ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) - - if info.Debug { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) - fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) - fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) - fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) - fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) - fmt.Fprintf(cli.out, " Init SHA1: %s\n", info.InitSha1) - fmt.Fprintf(cli.out, " Init Path: %s\n", info.InitPath) - fmt.Fprintf(cli.out, " Docker Root Dir: %s\n", info.DockerRootDir) - } - - ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) - ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) - ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) - - if info.IndexServerAddress != "" { - u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username - if len(u) > 0 { - fmt.Fprintf(cli.out, "Username: %v\n", u) - fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) - } - } - - // Only output these warnings if the server does not support these features - if info.OSType != "windows" { - if !info.MemoryLimit { - fmt.Fprintln(cli.err, "WARNING: No memory limit support") - } - if !info.SwapLimit { - fmt.Fprintln(cli.err, "WARNING: No swap limit support") - } - if !info.OomKillDisable { - fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") - } - if !info.CPUCfsQuota { - fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") - } - if !info.CPUCfsPeriod { - fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") - } - if !info.CPUShares { - fmt.Fprintln(cli.err, "WARNING: No cpu shares support") - } - if !info.CPUSet { - fmt.Fprintln(cli.err, "WARNING: No cpuset support") - } - if !info.IPv4Forwarding { - fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") - } - if !info.BridgeNfIptables { - fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") - } - if !info.BridgeNfIP6tables { - fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") - } - } - - if info.Labels != nil { - fmt.Fprintln(cli.out, "Labels:") - for _, attribute := range info.Labels { - fmt.Fprintf(cli.out, " %s\n", attribute) - } - } - - ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) - if info.ClusterStore != "" { - fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) - } - - if info.ClusterAdvertise != "" { - fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go deleted file mode 100644 index 5401d3bc..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect.go +++ /dev/null @@ -1,133 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "text/template" - - "github.com/docker/docker/api/client/inspect" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/client" -) - -var funcMap = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, -} - -// CmdInspect displays low-level information on one or more containers or images. -// -// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) - tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") - inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") - size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { - return fmt.Errorf("%q is not a valid value for --type", *inspectType) - } - - var elementSearcher inspectSearcher - switch *inspectType { - case "container": - elementSearcher = cli.inspectContainers(*size) - case "image": - elementSearcher = cli.inspectImages(*size) - default: - elementSearcher = cli.inspectAll(*size) - } - - return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) -} - -func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { - return func(ref string) (interface{}, []byte, error) { - return cli.client.ContainerInspectWithRaw(ref, getSize) - } -} - -func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { - return func(ref string) (interface{}, []byte, error) { - return cli.client.ImageInspectWithRaw(ref, getSize) - } -} - -func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { - return func(ref string) (interface{}, []byte, error) { - c, rawContainer, err := cli.client.ContainerInspectWithRaw(ref, getSize) - if err != nil { - // Search for image with that id if a container doesn't exist. - if client.IsErrContainerNotFound(err) { - i, rawImage, err := cli.client.ImageInspectWithRaw(ref, getSize) - if err != nil { - if client.IsErrImageNotFound(err) { - return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) - } - return nil, nil, err - } - return i, rawImage, err - } - return nil, nil, err - } - return c, rawContainer, err - } -} - -type inspectSearcher func(ref string) (interface{}, []byte, error) - -func (cli *DockerCli) inspectElements(tmplStr string, references []string, searchByReference inspectSearcher) error { - elementInspector, err := cli.newInspectorWithTemplate(tmplStr) - if err != nil { - return Cli.StatusError{StatusCode: 64, Status: err.Error()} - } - - var inspectErr error - for _, ref := range references { - element, raw, err := searchByReference(ref) - if err != nil { - inspectErr = err - break - } - - if err := elementInspector.Inspect(element, raw); err != nil { - inspectErr = err - break - } - } - - if err := elementInspector.Flush(); err != nil { - cli.inspectErrorStatus(err) - } - - if status := cli.inspectErrorStatus(inspectErr); status != 0 { - return Cli.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) inspectErrorStatus(err error) (status int) { - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - } - return -} - -func (cli *DockerCli) newInspectorWithTemplate(tmplStr string) (inspect.Inspector, error) { - elementInspector := inspect.NewIndentedInspector(cli.out) - if tmplStr != "" { - tmpl, err := template.New("").Funcs(funcMap).Parse(tmplStr) - if err != nil { - return nil, fmt.Errorf("Template parsing error: %s", err) - } - elementInspector = inspect.NewTemplateInspector(cli.out, tmpl) - } - return elementInspector, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector.go deleted file mode 100644 index a1d16d47..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector.go +++ /dev/null @@ -1,119 +0,0 @@ -package inspect - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "text/template" -) - -// Inspector defines an interface to implement to process elements -type Inspector interface { - Inspect(typedElement interface{}, rawElement []byte) error - Flush() error -} - -// TemplateInspector uses a text template to inspect elements. -type TemplateInspector struct { - outputStream io.Writer - buffer *bytes.Buffer - tmpl *template.Template -} - -// NewTemplateInspector creates a new inspector with a template. -func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { - return &TemplateInspector{ - outputStream: outputStream, - buffer: new(bytes.Buffer), - tmpl: tmpl, - } -} - -// Inspect executes the inspect template. -// It decodes the raw element into a map if the initial execution fails. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { - buffer := new(bytes.Buffer) - if err := i.tmpl.Execute(buffer, typedElement); err != nil { - if rawElement == nil { - return fmt.Errorf("Template parsing error: %v", err) - } - return i.tryRawInspectFallback(rawElement, err) - } - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// Flush write the result of inspecting all elements into the output stream. -func (i *TemplateInspector) Flush() error { - if i.buffer.Len() == 0 { - _, err := io.WriteString(i.outputStream, "\n") - return err - } - _, err := io.Copy(i.outputStream, i.buffer) - return err -} - -// IndentedInspector uses a buffer to stop the indented representation of an element. -type IndentedInspector struct { - outputStream io.Writer - elements []interface{} - rawElements [][]byte -} - -// NewIndentedInspector generates a new IndentedInspector. -func NewIndentedInspector(outputStream io.Writer) Inspector { - return &IndentedInspector{ - outputStream: outputStream, - } -} - -// Inspect writes the raw element with an indented json format. -func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { - if rawElement != nil { - i.rawElements = append(i.rawElements, rawElement) - } else { - i.elements = append(i.elements, typedElement) - } - return nil -} - -// Flush write the result of inspecting all elements into the output stream. -func (i *IndentedInspector) Flush() error { - if len(i.elements) == 0 && len(i.rawElements) == 0 { - _, err := io.WriteString(i.outputStream, "[]\n") - return err - } - - var buffer io.Reader - if len(i.rawElements) > 0 { - bytesBuffer := new(bytes.Buffer) - bytesBuffer.WriteString("[") - for idx, r := range i.rawElements { - bytesBuffer.Write(r) - if idx < len(i.rawElements)-1 { - bytesBuffer.WriteString(",") - } - } - bytesBuffer.WriteString("]") - indented := new(bytes.Buffer) - if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { - return err - } - buffer = indented - } else { - b, err := json.MarshalIndent(i.elements, "", " ") - if err != nil { - return err - } - buffer = bytes.NewReader(b) - } - - if _, err := io.Copy(i.outputStream, buffer); err != nil { - return err - } - _, err := io.WriteString(i.outputStream, "\n") - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go14.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go14.go deleted file mode 100644 index 39a0510c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go14.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build !go1.5 - -package inspect - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -// tryeRawInspectFallback executes the inspect template with a raw interface. -// This allows docker cli to parse inspect structs injected with Swarm fields. -// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. -// It doesn't allow to modify this behavior either, sending messages to the output. -// We assume that the template is invalid when there is a , if the template was valid -// we'd get or "" values. In that case we fail with the original error raised executing the -// template with the typed input. -func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, originalErr error) error { - var raw interface{} - buffer := new(bytes.Buffer) - rdr := bytes.NewReader(rawElement) - dec := json.NewDecoder(rdr) - - if rawErr := dec.Decode(&raw); rawErr != nil { - return fmt.Errorf("unable to read inspect data: %v", rawErr) - } - - if rawErr := i.tmpl.Execute(buffer, raw); rawErr != nil { - return fmt.Errorf("Template parsing error: %v", rawErr) - } - - if strings.Contains(buffer.String(), "") { - return fmt.Errorf("Template parsing error: %v", originalErr) - } - - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go15.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go15.go deleted file mode 100644 index b098f415..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/inspect/inspector_go15.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.5 - -package inspect - -import ( - "bytes" - "encoding/json" - "fmt" -) - -func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, _ error) error { - var raw interface{} - buffer := new(bytes.Buffer) - rdr := bytes.NewReader(rawElement) - dec := json.NewDecoder(rdr) - - if rawErr := dec.Decode(&raw); rawErr != nil { - return fmt.Errorf("unable to read inspect data: %v", rawErr) - } - - tmplMissingKey := i.tmpl.Option("missingkey=error") - if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { - return fmt.Errorf("Template parsing error: %v", rawErr) - } - - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go deleted file mode 100644 index 5e8e5b07..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/kill.go +++ /dev/null @@ -1,33 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdKill kills one or more running container using SIGKILL or a specified signal. -// -// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdKill(args ...string) error { - cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) - signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - if err := cli.client.ContainerKill(name, *signal); err != nil { - errs = append(errs, fmt.Sprintf("Failed to kill container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go deleted file mode 100644 index 82cf34d8..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/load.go +++ /dev/null @@ -1,45 +0,0 @@ -package client - -import ( - "io" - "os" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdLoad loads an image from a tar archive. -// -// The tar archive is read from STDIN by default, or from a tar archive file. -// -// Usage: docker load [OPTIONS] -func (cli *DockerCli) CmdLoad(args ...string) error { - cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) - infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") - cmd.Require(flag.Exact, 0) - cmd.ParseFlags(args, true) - - var input io.Reader = cli.in - if *infile != "" { - file, err := os.Open(*infile) - if err != nil { - return err - } - defer file.Close() - input = file - } - - response, err := cli.client.ImageLoad(input) - if err != nil { - return err - } - defer response.Body.Close() - - if response.JSON { - return jsonmessage.DisplayJSONMessagesStream(response.Body, cli.out, cli.outFd, cli.isTerminalOut, nil) - } - - _, err = io.Copy(cli.out, response.Body) - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go deleted file mode 100644 index a396450d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/login.go +++ /dev/null @@ -1,143 +0,0 @@ -package client - -import ( - "bufio" - "fmt" - "io" - "os" - "runtime" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" -) - -// CmdLogin logs in or registers a user to a Docker registry service. -// -// If no server is specified, the user will be logged into or registered to the registry's index server. -// -// Usage: docker login SERVER -func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) - cmd.Require(flag.Max, 1) - - var username, password, email string - - cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") - cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") - cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") - - cmd.ParseFlags(args, true) - - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.in = os.Stdin - } - - serverAddress := registry.IndexServer - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - promptDefault := func(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } - } - - readInput := func(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) - } - - authconfig, ok := cli.configFile.AuthConfigs[serverAddress] - if !ok { - authconfig = types.AuthConfig{} - } - - if username == "" { - promptDefault("Username", authconfig.Username) - username = readInput(cli.in, cli.out) - username = strings.TrimSpace(username) - if username == "" { - username = authconfig.Username - } - } - // Assume that a different username means they may not want to use - // the password or email from the config file, so prompt them - if username != authconfig.Username { - if password == "" { - oldState, err := term.SaveState(cli.inFd) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.inFd, oldState) - - password = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.inFd, oldState) - if password == "" { - return fmt.Errorf("Error : Password Required") - } - } - - if email == "" { - promptDefault("Email", authconfig.Email) - email = readInput(cli.in, cli.out) - if email == "" { - email = authconfig.Email - } - } - } else { - // However, if they don't override the username use the - // password or email from the cmd line if specified. IOW, allow - // then to change/override them. And if not specified, just - // use what's in the config file - if password == "" { - password = authconfig.Password - } - if email == "" { - email = authconfig.Email - } - } - authconfig.Username = username - authconfig.Password = password - authconfig.Email = email - authconfig.ServerAddress = serverAddress - cli.configFile.AuthConfigs[serverAddress] = authconfig - - auth := cli.configFile.AuthConfigs[serverAddress] - response, err := cli.client.RegistryLogin(auth) - if err != nil { - if client.IsErrUnauthorized(err) { - delete(cli.configFile.AuthConfigs, serverAddress) - if err2 := cli.configFile.Save(); err2 != nil { - fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2) - } - } - return err - } - - if err := cli.configFile.Save(); err != nil { - return fmt.Errorf("Error saving config file: %v", err) - } - fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename()) - - if response.Status != "" { - fmt.Fprintf(cli.out, "%s\n", response.Status) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go deleted file mode 100644 index 3753cbbe..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/logout.go +++ /dev/null @@ -1,39 +0,0 @@ -package client - -import ( - "fmt" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/registry" -) - -// CmdLogout logs a user out from a Docker registry. -// -// If no server is specified, the user will be logged out from the registry's index server. -// -// Usage: docker logout [SERVER] -func (cli *DockerCli) CmdLogout(args ...string) error { - cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) - cmd.Require(flag.Max, 1) - - cmd.ParseFlags(args, true) - - serverAddress := registry.IndexServer - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { - fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) - return nil - } - - fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) - delete(cli.configFile.AuthConfigs, serverAddress) - if err := cli.configFile.Save(); err != nil { - return fmt.Errorf("Failed to save docker config: %v", err) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go deleted file mode 100644 index 92b75e05..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/logs.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -import ( - "fmt" - "io" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" -) - -var validDrivers = map[string]bool{ - "json-file": true, - "journald": true, -} - -// CmdLogs fetches the logs of a given container. -// -// docker logs [OPTIONS] CONTAINER -func (cli *DockerCli) CmdLogs(args ...string) error { - cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) - follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") - since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") - times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") - tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - - name := cmd.Arg(0) - - c, err := cli.client.ContainerInspect(name) - if err != nil { - return err - } - - if !validDrivers[c.HostConfig.LogConfig.Type] { - return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) - } - - options := types.ContainerLogsOptions{ - ContainerID: name, - ShowStdout: true, - ShowStderr: true, - Since: *since, - Timestamps: *times, - Follow: *follow, - Tail: *tail, - } - responseBody, err := cli.client.ContainerLogs(options) - if err != nil { - return err - } - defer responseBody.Close() - - if c.Config.Tty { - _, err = io.Copy(cli.out, responseBody) - } else { - _, err = stdcopy.StdCopy(cli.out, cli.err, responseBody) - } - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go deleted file mode 100644 index 56adabc0..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/network.go +++ /dev/null @@ -1,378 +0,0 @@ -package client - -import ( - "fmt" - "net" - "strings" - "text/tabwriter" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/stringid" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" -) - -// CmdNetwork is the parent subcommand for all network commands -// -// Usage: docker network [OPTIONS] -func (cli *DockerCli) CmdNetwork(args ...string) error { - cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) - cmd.Require(flag.Min, 1) - err := cmd.ParseFlags(args, true) - cmd.Usage() - return err -} - -// CmdNetworkCreate creates a new network with a given name -// -// Usage: docker network create [OPTIONS] -func (cli *DockerCli) CmdNetworkCreate(args ...string) error { - cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) - flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") - flOpts := opts.NewMapOpts(nil, nil) - - flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") - flIpamSubnet := opts.NewListOpts(nil) - flIpamIPRange := opts.NewListOpts(nil) - flIpamGateway := opts.NewListOpts(nil) - flIpamAux := opts.NewMapOpts(nil, nil) - flIpamOpt := opts.NewMapOpts(nil, nil) - - cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") - cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") - cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") - cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") - cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") - cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options") - - flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network") - - cmd.Require(flag.Exact, 1) - err := cmd.ParseFlags(args, true) - if err != nil { - return err - } - - // Set the default driver to "" if the user didn't set the value. - // That way we can know whether it was user input or not. - driver := *flDriver - if !cmd.IsSet("-driver") && !cmd.IsSet("d") { - driver = "" - } - - ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) - if err != nil { - return err - } - - // Construct network create request body - nc := types.NetworkCreate{ - Name: cmd.Arg(0), - Driver: driver, - IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()}, - Options: flOpts.GetAll(), - CheckDuplicate: true, - Internal: *flInternal, - } - - resp, err := cli.client.NetworkCreate(nc) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "%s\n", resp.ID) - return nil -} - -// CmdNetworkRm deletes one or more networks -// -// Usage: docker network rm NETWORK-NAME|NETWORK-ID [NETWORK-NAME|NETWORK-ID...] -func (cli *DockerCli) CmdNetworkRm(args ...string) error { - cmd := Cli.Subcmd("network rm", []string{"NETWORK [NETWORK...]"}, "Deletes one or more networks", false) - cmd.Require(flag.Min, 1) - if err := cmd.ParseFlags(args, true); err != nil { - return err - } - - status := 0 - for _, net := range cmd.Args() { - if err := cli.client.NetworkRemove(net); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - } - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - return nil -} - -// CmdNetworkConnect connects a container to a network -// -// Usage: docker network connect [OPTIONS] -func (cli *DockerCli) CmdNetworkConnect(args ...string) error { - cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) - flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address") - flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address") - flLinks := opts.NewListOpts(runconfigopts.ValidateLink) - cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") - flAliases := opts.NewListOpts(nil) - cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container") - cmd.Require(flag.Min, 2) - if err := cmd.ParseFlags(args, true); err != nil { - return err - } - epConfig := &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: *flIPAddress, - IPv6Address: *flIPv6Address, - }, - Links: flLinks.GetAll(), - Aliases: flAliases.GetAll(), - } - return cli.client.NetworkConnect(cmd.Arg(0), cmd.Arg(1), epConfig) -} - -// CmdNetworkDisconnect disconnects a container from a network -// -// Usage: docker network disconnect -func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { - cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) - force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network") - cmd.Require(flag.Exact, 2) - if err := cmd.ParseFlags(args, true); err != nil { - return err - } - - return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1), *force) -} - -// CmdNetworkLs lists all the networks managed by docker daemon -// -// Usage: docker network ls [OPTIONS] -func (cli *DockerCli) CmdNetworkLs(args ...string) error { - cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") - - flFilter := opts.NewListOpts(nil) - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - - cmd.Require(flag.Exact, 0) - err := cmd.ParseFlags(args, true) - if err != nil { - return err - } - - // Consolidate all filter flags, and sanity check them early. - // They'll get process after get response from server. - netFilterArgs := filters.NewArgs() - for _, f := range flFilter.GetAll() { - if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { - return err - } - } - - options := types.NetworkListOptions{ - Filters: netFilterArgs, - } - - networkResources, err := cli.client.NetworkList(options) - if err != nil { - return err - } - - wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - - // unless quiet (-q) is specified, print field titles - if !*quiet { - fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") - } - - for _, networkResource := range networkResources { - ID := networkResource.ID - netName := networkResource.Name - if !*noTrunc { - ID = stringid.TruncateID(ID) - } - if *quiet { - fmt.Fprintln(wr, ID) - continue - } - driver := networkResource.Driver - fmt.Fprintf(wr, "%s\t%s\t%s\t", - ID, - netName, - driver) - fmt.Fprint(wr, "\n") - } - wr.Flush() - return nil -} - -// CmdNetworkInspect inspects the network object for more details -// -// Usage: docker network inspect [OPTIONS] [NETWORK...] -func (cli *DockerCli) CmdNetworkInspect(args ...string) error { - cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on one or more networks", false) - tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") - cmd.Require(flag.Min, 1) - - if err := cmd.ParseFlags(args, true); err != nil { - return err - } - - inspectSearcher := func(name string) (interface{}, []byte, error) { - i, err := cli.client.NetworkInspect(name) - return i, nil, err - } - - return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) -} - -// Consolidates the ipam configuration as a group from different related configurations -// user can configure network with multiple non-overlapping subnets and hence it is -// possible to correlate the various related parameters and consolidate them. -// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into -// structured ipam data. -func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { - if len(subnets) < len(ranges) || len(subnets) < len(gateways) { - return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") - } - iData := map[string]*network.IPAMConfig{} - - // Populate non-overlapping subnets into consolidation map - for _, s := range subnets { - for k := range iData { - ok1, err := subnetMatches(s, k) - if err != nil { - return nil, err - } - ok2, err := subnetMatches(k, s) - if err != nil { - return nil, err - } - if ok1 || ok2 { - return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") - } - } - iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} - } - - // Validate and add valid ip ranges - for _, r := range ranges { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, r) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].IPRange != "" { - return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) - } - d := iData[s] - d.IPRange = r - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for range %s", r) - } - } - - // Validate and add valid gateways - for _, g := range gateways { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, g) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].Gateway != "" { - return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) - } - d := iData[s] - d.Gateway = g - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for gateway %s", g) - } - } - - // Validate and add aux-addresses - for key, aa := range auxaddrs { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, aa) - if err != nil { - return nil, err - } - if !ok { - continue - } - iData[s].AuxAddress[key] = aa - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) - } - } - - idl := []network.IPAMConfig{} - for _, v := range iData { - idl = append(idl, *v) - } - return idl, nil -} - -func subnetMatches(subnet, data string) (bool, error) { - var ( - ip net.IP - ) - - _, s, err := net.ParseCIDR(subnet) - if err != nil { - return false, fmt.Errorf("Invalid subnet %s : %v", s, err) - } - - if strings.Contains(data, "/") { - ip, _, err = net.ParseCIDR(data) - if err != nil { - return false, fmt.Errorf("Invalid cidr %s : %v", data, err) - } - } else { - ip = net.ParseIP(data) - } - - return s.Contains(ip), nil -} - -func networkUsage() string { - networkCommands := map[string]string{ - "create": "Create a network", - "connect": "Connect container to a network", - "disconnect": "Disconnect container from a network", - "inspect": "Display detailed network information", - "ls": "List all networks", - "rm": "Remove a network", - } - - help := "Commands:\n" - - for cmd, description := range networkCommands { - help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) - } - - help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") - return help -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go deleted file mode 100644 index dab4eb2e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/pause.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdPause pauses all processes within one or more containers. -// -// Usage: docker pause CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdPause(args ...string) error { - cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - if err := cli.client.ContainerPause(name); err != nil { - errs = append(errs, fmt.Sprintf("Failed to pause container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go deleted file mode 100644 index 0dd3da45..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/port.go +++ /dev/null @@ -1,59 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/go-connections/nat" -) - -// CmdPort lists port mappings for a container. -// If a private port is specified, it also shows the public-facing port that is NATed to the private port. -// -// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] -func (cli *DockerCli) CmdPort(args ...string) error { - cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - c, err := cli.client.ContainerInspect(cmd.Arg(0)) - if err != nil { - return err - } - - if cmd.NArg() == 2 { - var ( - port = cmd.Arg(1) - proto = "tcp" - parts = strings.SplitN(port, "/", 2) - ) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - natPort := port + "/" + proto - newP, err := nat.NewPort(proto, port) - if err != nil { - return err - } - if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) - } - return nil - } - return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) - } - - for from, frontends := range c.NetworkSettings.Ports { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go deleted file mode 100644 index c0589738..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/ps.go +++ /dev/null @@ -1,87 +0,0 @@ -package client - -import ( - "github.com/docker/docker/api/client/formatter" - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -// CmdPs outputs a list of Docker containers. -// -// Usage: docker ps [OPTIONS] -func (cli *DockerCli) CmdPs(args ...string) error { - var ( - err error - - psFilterArgs = filters.NewArgs() - - cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) - quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") - all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") - noTrunc = cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") - nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container (includes all states)") - since = cmd.String([]string{"#-since"}, "", "Show containers created since Id or Name (includes all states)") - before = cmd.String([]string{"#-before"}, "", "Only show containers created before Id or Name") - last = cmd.Int([]string{"n"}, -1, "Show n last created containers (includes all states)") - format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") - flFilter = opts.NewListOpts(nil) - ) - cmd.Require(flag.Exact, 0) - - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - - cmd.ParseFlags(args, true) - if *last == -1 && *nLatest { - *last = 1 - } - - // Consolidate all filter flags, and sanity check them. - // They'll get processed in the daemon/server. - for _, f := range flFilter.GetAll() { - if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { - return err - } - } - - options := types.ContainerListOptions{ - All: *all, - Limit: *last, - Since: *since, - Before: *before, - Size: *size, - Filter: psFilterArgs, - } - - containers, err := cli.client.ContainerList(options) - if err != nil { - return err - } - - f := *format - if len(f) == 0 { - if len(cli.PsFormat()) > 0 && !*quiet { - f = cli.PsFormat() - } else { - f = "table" - } - } - - psCtx := formatter.ContainerContext{ - Context: formatter.Context{ - Output: cli.out, - Format: f, - Quiet: *quiet, - Trunc: !*noTrunc, - }, - Size: *size, - Containers: containers, - } - - psCtx.Write() - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go deleted file mode 100644 index 19220b96..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/pull.go +++ /dev/null @@ -1,87 +0,0 @@ -package client - -import ( - "errors" - "fmt" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" -) - -// CmdPull pulls an image or a repository from the registry. -// -// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] -func (cli *DockerCli) CmdPull(args ...string) error { - cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) - allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") - addTrustedFlags(cmd, true) - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - remote := cmd.Arg(0) - - distributionRef, err := reference.ParseNamed(remote) - if err != nil { - return err - } - if *allTags && !reference.IsNameOnly(distributionRef) { - return errors.New("tag can't be used with --all-tags/-a") - } - - if !*allTags && reference.IsNameOnly(distributionRef) { - distributionRef = reference.WithDefaultTag(distributionRef) - fmt.Fprintf(cli.out, "Using default tag: %s\n", reference.DefaultTag) - } - - var tag string - switch x := distributionRef.(type) { - case reference.Canonical: - tag = x.Digest().String() - case reference.NamedTagged: - tag = x.Tag() - } - - ref := registry.ParseReference(tag) - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(distributionRef) - if err != nil { - return err - } - - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) - requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") - - if isTrusted() && !ref.HasDigest() { - // Check if tag is digest - return cli.trustedPull(repoInfo, ref, authConfig, requestPrivilege) - } - - return cli.imagePullPrivileged(authConfig, distributionRef.String(), "", requestPrivilege) -} - -func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) error { - - encodedAuth, err := encodeAuthToBase64(authConfig) - if err != nil { - return err - } - options := types.ImagePullOptions{ - ImageID: imageID, - Tag: tag, - RegistryAuth: encodedAuth, - } - - responseBody, err := cli.client.ImagePull(options, requestPrivilege) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go deleted file mode 100644 index 9e4972c3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/push.go +++ /dev/null @@ -1,74 +0,0 @@ -package client - -import ( - "errors" - "io" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" -) - -// CmdPush pushes an image or repository to the registry. -// -// Usage: docker push NAME[:TAG] -func (cli *DockerCli) CmdPush(args ...string) error { - cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) - addTrustedFlags(cmd, false) - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - - ref, err := reference.ParseNamed(cmd.Arg(0)) - if err != nil { - return err - } - - var tag string - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot push a digest reference") - case reference.NamedTagged: - tag = x.Tag() - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - // Resolve the Auth config relevant for this server - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) - - requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") - if isTrusted() { - return cli.trustedPush(repoInfo, tag, authConfig, requestPrivilege) - } - - responseBody, err := cli.imagePushPrivileged(authConfig, ref.Name(), tag, requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) -} - -func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) (io.ReadCloser, error) { - encodedAuth, err := encodeAuthToBase64(authConfig) - if err != nil { - return nil, err - } - options := types.ImagePushOptions{ - ImageID: imageID, - Tag: tag, - RegistryAuth: encodedAuth, - } - - return cli.client.ImagePush(options, requestPrivilege) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go deleted file mode 100644 index a67d5d02..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/rename.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdRename renames a container. -// -// Usage: docker rename OLD_NAME NEW_NAME -func (cli *DockerCli) CmdRename(args ...string) error { - cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) - cmd.Require(flag.Exact, 2) - - cmd.ParseFlags(args, true) - - oldName := strings.TrimSpace(cmd.Arg(0)) - newName := strings.TrimSpace(cmd.Arg(1)) - - if oldName == "" || newName == "" { - return fmt.Errorf("Error: Neither old nor new names may be empty") - } - - if err := cli.client.ContainerRename(oldName, newName); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - return fmt.Errorf("Error: failed to rename container named %s", oldName) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go deleted file mode 100644 index 245aca54..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/restart.go +++ /dev/null @@ -1,33 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdRestart restarts one or more containers. -// -// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdRestart(args ...string) error { - cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - if err := cli.client.ContainerRestart(name, *nSeconds); err != nil { - errs = append(errs, fmt.Sprintf("Failed to kill container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go deleted file mode 100644 index a6c4d9d5..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/rm.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" -) - -// CmdRm removes one or more containers. -// -// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdRm(args ...string) error { - cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) - v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") - link := cmd.Bool([]string{"l", "-link"}, false, "Remove the specified link") - force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - if name == "" { - return fmt.Errorf("Container name cannot be empty") - } - name = strings.Trim(name, "/") - - options := types.ContainerRemoveOptions{ - ContainerID: name, - RemoveVolumes: *v, - RemoveLinks: *link, - Force: *force, - } - - if err := cli.client.ContainerRemove(options); err != nil { - errs = append(errs, fmt.Sprintf("Failed to remove container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go deleted file mode 100644 index a190ab38..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/rmi.go +++ /dev/null @@ -1,57 +0,0 @@ -package client - -import ( - "fmt" - "net/url" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" -) - -// CmdRmi removes all images with the specified name(s). -// -// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] -func (cli *DockerCli) CmdRmi(args ...string) error { - cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) - force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") - noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - v := url.Values{} - if *force { - v.Set("force", "1") - } - if *noprune { - v.Set("noprune", "1") - } - - var errs []string - for _, name := range cmd.Args() { - options := types.ImageRemoveOptions{ - ImageID: name, - Force: *force, - PruneChildren: !*noprune, - } - - dels, err := cli.client.ImageRemove(options) - if err != nil { - errs = append(errs, fmt.Sprintf("Failed to remove image (%s): %s", name, err)) - } else { - for _, del := range dels { - if del.Deleted != "" { - fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) - } else { - fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) - } - } - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go deleted file mode 100644 index 16f4230e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/run.go +++ /dev/null @@ -1,290 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - "runtime" - "strings" - - "github.com/Sirupsen/logrus" - Cli "github.com/docker/docker/cli" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/libnetwork/resolvconf/dns" -) - -func (cid *cidFile) Close() error { - cid.file.Close() - - if !cid.written { - if err := os.Remove(cid.path); err != nil { - return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) - } - } - - return nil -} - -func (cid *cidFile) Write(id string) error { - if _, err := cid.file.Write([]byte(id)); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - cid.written = true - return nil -} - -// if container start fails with 'command not found' error, return 127 -// if container start fails with 'command cannot be invoked' error, return 126 -// return 125 for generic docker daemon failures -func runStartContainerErr(err error) error { - trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") - statusError := Cli.StatusError{} - derrCmdNotFound := derr.ErrorCodeCmdNotFound.Message() - derrCouldNotInvoke := derr.ErrorCodeCmdCouldNotBeInvoked.Message() - derrNoSuchImage := derr.ErrorCodeNoSuchImageHash.Message() - derrNoSuchImageTag := derr.ErrorCodeNoSuchImageTag.Message() - switch trimmedErr { - case derrCmdNotFound: - statusError = Cli.StatusError{StatusCode: 127} - case derrCouldNotInvoke: - statusError = Cli.StatusError{StatusCode: 126} - case derrNoSuchImage, derrNoSuchImageTag: - statusError = Cli.StatusError{StatusCode: 125} - default: - statusError = Cli.StatusError{StatusCode: 125} - } - return statusError -} - -// CmdRun runs a command in a new container. -// -// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] -func (cli *DockerCli) CmdRun(args ...string) error { - cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) - addTrustedFlags(cmd, true) - - // These are flags not stored in Config/HostConfig - var ( - flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") - flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") - flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") - flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") - flAttach *opts.ListOpts - - ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") - ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") - ) - - config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) - - // just in case the Parse does not exit - if err != nil { - cmd.ReportError(err.Error(), true) - os.Exit(125) - } - - if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { - fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") - } - - if len(hostConfig.DNS) > 0 { - // check the DNS settings passed via --dns against - // localhost regexp to warn if they are trying to - // set a DNS to a localhost address - for _, dnsIP := range hostConfig.DNS { - if dns.IsLocalhost(dnsIP) { - fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) - break - } - } - } - if config.Image == "" { - cmd.Usage() - return nil - } - - config.ArgsEscaped = false - - if !*flDetach { - if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { - return err - } - } else { - if fl := cmd.Lookup("-attach"); fl != nil { - flAttach = fl.Value.(*opts.ListOpts) - if flAttach.Len() != 0 { - return ErrConflictAttachDetach - } - } - if *flAutoRemove { - return ErrConflictDetachAutoRemove - } - - config.AttachStdin = false - config.AttachStdout = false - config.AttachStderr = false - config.StdinOnce = false - } - - // Disable flSigProxy when in TTY mode - sigProxy := *flSigProxy - if config.Tty { - sigProxy = false - } - - // Telling the Windows daemon the initial size of the tty during start makes - // a far better user experience rather than relying on subsequent resizes - // to cause things to catch up. - if runtime.GOOS == "windows" { - hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() - } - - createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) - if err != nil { - cmd.ReportError(err.Error(), true) - return runStartContainerErr(err) - } - if sigProxy { - sigc := cli.forwardAllSignals(createResponse.ID) - defer signal.StopCatch(sigc) - } - var ( - waitDisplayID chan struct{} - errCh chan error - ) - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchronous to allow the client to write to stdin before having to read the ID - waitDisplayID = make(chan struct{}) - go func() { - defer close(waitDisplayID) - fmt.Fprintf(cli.out, "%s\n", createResponse.ID) - }() - } - if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { - return ErrConflictRestartPolicyAndAutoRemove - } - - if config.AttachStdin || config.AttachStdout || config.AttachStderr { - var ( - out, stderr io.Writer - in io.ReadCloser - ) - if config.AttachStdin { - in = cli.in - } - if config.AttachStdout { - out = cli.out - } - if config.AttachStderr { - if config.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - if *flDetachKeys != "" { - cli.configFile.DetachKeys = *flDetachKeys - } - - options := types.ContainerAttachOptions{ - ContainerID: createResponse.ID, - Stream: true, - Stdin: config.AttachStdin, - Stdout: config.AttachStdout, - Stderr: config.AttachStderr, - DetachKeys: cli.configFile.DetachKeys, - } - - resp, err := cli.client.ContainerAttach(options) - if err != nil { - return err - } - if in != nil && config.Tty { - if err := cli.setRawTerminal(); err != nil { - return err - } - defer cli.restoreTerminal(in) - } - errCh = promise.Go(func() error { - return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) - }) - } - - defer func() { - if *flAutoRemove { - options := types.ContainerRemoveOptions{ - ContainerID: createResponse.ID, - RemoveVolumes: true, - } - if err := cli.client.ContainerRemove(options); err != nil { - fmt.Fprintf(cli.err, "Error deleting container: %s\n", err) - } - } - }() - - //start the container - if err := cli.client.ContainerStart(createResponse.ID); err != nil { - cmd.ReportError(err.Error(), false) - return runStartContainerErr(err) - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { - if err := cli.monitorTtySize(createResponse.ID, false); err != nil { - fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayID - return nil - } - - var status int - - // Attached mode - if *flAutoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if status, err = cli.client.ContainerWait(createResponse.ID); err != nil { - return runStartContainerErr(err) - } - if _, status, err = getExitCode(cli, createResponse.ID); err != nil { - return err - } - } else { - // No Autoremove: Simply retrieve the exit code - if !config.Tty { - // In non-TTY mode, we can't detach, so we must wait for container exit - if status, err = cli.client.ContainerWait(createResponse.ID); err != nil { - return err - } - } else { - // In TTY mode, there is a race: if the process dies too slowly, the state could - // be updated after the getExitCode call and result in the wrong exit code being reported - if _, status, err = getExitCode(cli, createResponse.ID); err != nil { - return err - } - } - } - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go deleted file mode 100644 index e7e0166d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/save.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "errors" - "io" - "os" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdSave saves one or more images to a tar archive. -// -// The tar archive is written to STDOUT by default, or written to a file. -// -// Usage: docker save [OPTIONS] IMAGE [IMAGE...] -func (cli *DockerCli) CmdSave(args ...string) error { - cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) - outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var ( - output = cli.out - err error - ) - - if *outfile == "" && cli.isTerminalOut { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - if *outfile != "" { - if output, err = os.Create(*outfile); err != nil { - return err - } - } - - responseBody, err := cli.client.ImageSave(cmd.Args()) - if err != nil { - return err - } - defer responseBody.Close() - - _, err = io.Copy(output, responseBody) - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go deleted file mode 100644 index 64bbb67e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/search.go +++ /dev/null @@ -1,91 +0,0 @@ -package client - -import ( - "fmt" - "net/url" - "sort" - "strings" - "text/tabwriter" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// CmdSearch searches the Docker Hub for images. -// -// Usage: docker search [OPTIONS] TERM -func (cli *DockerCli) CmdSearch(args ...string) error { - cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) - noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") - automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") - stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") - cmd.Require(flag.Exact, 1) - - cmd.ParseFlags(args, true) - - name := cmd.Arg(0) - v := url.Values{} - v.Set("term", name) - - indexInfo, err := registry.ParseSearchIndexInfo(name) - if err != nil { - return err - } - - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, indexInfo) - requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") - - encodedAuth, err := encodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageSearchOptions{ - Term: name, - RegistryAuth: encodedAuth, - } - - unorderedResults, err := cli.client.ImageSearch(options, requestPrivilege) - if err != nil { - return err - } - - results := searchResultsByStars(unorderedResults) - sort.Sort(results) - - w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") - for _, res := range results { - if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { - continue - } - desc := strings.Replace(res.Description, "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !*noTrunc && len(desc) > 45 { - desc = stringutils.Truncate(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) - if res.IsOfficial { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if res.IsAutomated || res.IsTrusted { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// SearchResultsByStars sorts search results in descending order by number of stars. -type searchResultsByStars []registrytypes.SearchResult - -func (r searchResultsByStars) Len() int { return len(r) } -func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go deleted file mode 100644 index 0d44217d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/start.go +++ /dev/null @@ -1,155 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/Sirupsen/logrus" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" -) - -func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { - sigc := make(chan os.Signal, 128) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == signal.SIGCHLD { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) - continue - } - - if err := cli.client.ContainerKill(cid, sig); err != nil { - logrus.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -// CmdStart starts one or more containers. -// -// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdStart(args ...string) error { - cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) - attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") - openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") - detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - if *attach || *openStdin { - // We're going to attach to a container. - // 1. Ensure we only have one container. - if cmd.NArg() > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - // 2. Attach to the container. - containerID := cmd.Arg(0) - c, err := cli.client.ContainerInspect(containerID) - if err != nil { - return err - } - - if !c.Config.Tty { - sigc := cli.forwardAllSignals(containerID) - defer signal.StopCatch(sigc) - } - - if *detachKeys != "" { - cli.configFile.DetachKeys = *detachKeys - } - - options := types.ContainerAttachOptions{ - ContainerID: containerID, - Stream: true, - Stdin: *openStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: cli.configFile.DetachKeys, - } - - var in io.ReadCloser - if options.Stdin { - in = cli.in - } - - resp, err := cli.client.ContainerAttach(options) - if err != nil { - return err - } - defer resp.Close() - if in != nil && c.Config.Tty { - if err := cli.setRawTerminal(); err != nil { - return err - } - defer cli.restoreTerminal(in) - } - - cErr := promise.Go(func() error { - return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) - }) - - // 3. Start the container. - if err := cli.client.ContainerStart(containerID); err != nil { - return err - } - - // 4. Wait for attachment to break. - if c.Config.Tty && cli.isTerminalOut { - if err := cli.monitorTtySize(containerID, false); err != nil { - fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) - } - } - if attchErr := <-cErr; attchErr != nil { - return attchErr - } - _, status, err := getExitCode(cli, containerID) - if err != nil { - return err - } - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - } else { - // We're not going to attach to anything. - // Start as many containers as we want. - return cli.startContainersWithoutAttachments(cmd.Args()) - } - - return nil -} - -func (cli *DockerCli) startContainersWithoutAttachments(containerIDs []string) error { - var failedContainers []string - for _, containerID := range containerIDs { - if err := cli.client.ContainerStart(containerID); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - failedContainers = append(failedContainers, containerID) - } else { - fmt.Fprintf(cli.out, "%s\n", containerID) - } - } - - if len(failedContainers) > 0 { - return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go deleted file mode 100644 index 46f4c04b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/stats.go +++ /dev/null @@ -1,344 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "io" - "sort" - "strings" - "sync" - "text/tabwriter" - "time" - - Cli "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/go-units" -) - -type containerStats struct { - Name string - CPUPercentage float64 - Memory float64 - MemoryLimit float64 - MemoryPercentage float64 - NetworkRx float64 - NetworkTx float64 - BlockRead float64 - BlockWrite float64 - mu sync.RWMutex - err error -} - -type stats struct { - mu sync.Mutex - cs []*containerStats -} - -func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { - responseBody, err := cli.client.ContainerStats(s.Name, streamStats) - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - return - } - defer responseBody.Close() - - var ( - previousCPU uint64 - previousSystem uint64 - dec = json.NewDecoder(responseBody) - u = make(chan error, 1) - ) - go func() { - for { - var v *types.StatsJSON - if err := dec.Decode(&v); err != nil { - u <- err - return - } - - var memPercent = 0.0 - var cpuPercent = 0.0 - - // MemoryStats.Limit will never be 0 unless the container is not running and we haven't - // got any data from cgroup - if v.MemoryStats.Limit != 0 { - memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - } - - previousCPU = v.PreCPUStats.CPUUsage.TotalUsage - previousSystem = v.PreCPUStats.SystemUsage - cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) - blkRead, blkWrite := calculateBlockIO(v.BlkioStats) - s.mu.Lock() - s.CPUPercentage = cpuPercent - s.Memory = float64(v.MemoryStats.Usage) - s.MemoryLimit = float64(v.MemoryStats.Limit) - s.MemoryPercentage = memPercent - s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) - s.BlockRead = float64(blkRead) - s.BlockWrite = float64(blkWrite) - s.mu.Unlock() - u <- nil - if !streamStats { - return - } - } - }() - for { - select { - case <-time.After(2 * time.Second): - // zero out the values if we have not received an update within - // the specified duration. - s.mu.Lock() - s.CPUPercentage = 0 - s.Memory = 0 - s.MemoryPercentage = 0 - s.MemoryLimit = 0 - s.NetworkRx = 0 - s.NetworkTx = 0 - s.BlockRead = 0 - s.BlockWrite = 0 - s.mu.Unlock() - case err := <-u: - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - return - } - } - if !streamStats { - return - } - } -} - -func (s *containerStats) Display(w io.Writer) error { - s.mu.RLock() - defer s.mu.RUnlock() - if s.err != nil { - return s.err - } - fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n", - s.Name, - s.CPUPercentage, - units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), - s.MemoryPercentage, - units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), - units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite)) - return nil -} - -// CmdStats displays a live stream of resource usage statistics for one or more containers. -// -// This shows real-time information on CPU usage, memory usage, and network I/O. -// -// Usage: docker stats [OPTIONS] [CONTAINER...] -func (cli *DockerCli) CmdStats(args ...string) error { - cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) - all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") - noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") - - cmd.ParseFlags(args, true) - - names := cmd.Args() - showAll := len(names) == 0 - - if showAll { - options := types.ContainerListOptions{ - All: *all, - } - cs, err := cli.client.ContainerList(options) - if err != nil { - return err - } - for _, c := range cs { - names = append(names, c.ID[:12]) - } - } - if len(names) == 0 && !showAll { - return fmt.Errorf("No containers found") - } - sort.Strings(names) - - var ( - cStats = stats{} - w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - ) - printHeader := func() { - if !*noStream { - fmt.Fprint(cli.out, "\033[2J") - fmt.Fprint(cli.out, "\033[H") - } - io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n") - } - for _, n := range names { - s := &containerStats{Name: n} - // no need to lock here since only the main goroutine is running here - cStats.cs = append(cStats.cs, s) - go s.Collect(cli, !*noStream) - } - closeChan := make(chan error) - if showAll { - type watch struct { - cid string - event string - err error - } - getNewContainers := func(c chan<- watch) { - f := filters.NewArgs() - f.Add("type", "container") - options := types.EventsOptions{ - Filters: f, - } - resBody, err := cli.client.Events(options) - if err != nil { - c <- watch{err: err} - return - } - defer resBody.Close() - - decodeEvents(resBody, func(event events.Message, err error) error { - if err != nil { - c <- watch{err: err} - return nil - } - - c <- watch{event.ID[:12], event.Action, nil} - return nil - }) - } - go func(stopChan chan<- error) { - cChan := make(chan watch) - go getNewContainers(cChan) - for { - c := <-cChan - if c.err != nil { - stopChan <- c.err - return - } - switch c.event { - case "create": - s := &containerStats{Name: c.cid} - cStats.mu.Lock() - cStats.cs = append(cStats.cs, s) - cStats.mu.Unlock() - go s.Collect(cli, !*noStream) - case "stop": - case "die": - if !*all { - var remove int - // cStats cannot be O(1) with a map cause ranging over it would cause - // containers in stats to move up and down in the list...:( - cStats.mu.Lock() - for i, s := range cStats.cs { - if s.Name == c.cid { - remove = i - break - } - } - cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...) - cStats.mu.Unlock() - } - } - } - }(closeChan) - } else { - close(closeChan) - } - // do a quick pause so that any failed connections for containers that do not exist are able to be - // evicted before we display the initial or default values. - time.Sleep(1500 * time.Millisecond) - var errs []string - cStats.mu.Lock() - for _, c := range cStats.cs { - c.mu.Lock() - if c.err != nil { - errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) - } - c.mu.Unlock() - } - cStats.mu.Unlock() - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, ", ")) - } - for range time.Tick(500 * time.Millisecond) { - printHeader() - toRemove := []int{} - cStats.mu.Lock() - for i, s := range cStats.cs { - if err := s.Display(w); err != nil && !*noStream { - toRemove = append(toRemove, i) - } - } - for j := len(toRemove) - 1; j >= 0; j-- { - i := toRemove[j] - cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) - } - if len(cStats.cs) == 0 && !showAll { - return nil - } - cStats.mu.Unlock() - w.Flush() - if *noStream { - break - } - select { - case err, ok := <-closeChan: - if ok { - if err != nil { - // this is suppressing "unexpected EOF" in the cli when the - // daemon restarts so it shutdowns cleanly - if err == io.ErrUnexpectedEOF { - return nil - } - return err - } - } - default: - // just skip - } - } - return nil -} - -func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) - // calculate the change for the entire system between readings - systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) - ) - - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 - } - return cpuPercent -} - -func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { - for _, bioEntry := range blkio.IoServiceBytesRecursive { - switch strings.ToLower(bioEntry.Op) { - case "read": - blkRead = blkRead + bioEntry.Value - case "write": - blkWrite = blkWrite + bioEntry.Value - } - } - return -} - -func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { - var rx, tx float64 - - for _, v := range network { - rx += float64(v.RxBytes) - tx += float64(v.TxBytes) - } - return rx, tx -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go deleted file mode 100644 index 9d429ea0..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/stop.go +++ /dev/null @@ -1,35 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdStop stops one or more containers. -// -// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). -// -// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdStop(args ...string) error { - cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - if err := cli.client.ContainerStop(name, *nSeconds); err != nil { - errs = append(errs, fmt.Sprintf("Failed to stop container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go deleted file mode 100644 index 5a824714..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/tag.go +++ /dev/null @@ -1,44 +0,0 @@ -package client - -import ( - "errors" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// CmdTag tags an image into a repository. -// -// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] -func (cli *DockerCli) CmdTag(args ...string) error { - cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) - force := cmd.Bool([]string{"#f", "#-force"}, false, "Force the tagging even if there's a conflict") - cmd.Require(flag.Exact, 2) - - cmd.ParseFlags(args, true) - - ref, err := reference.ParseNamed(cmd.Arg(1)) - if err != nil { - return err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } - - options := types.ImageTagOptions{ - ImageID: cmd.Arg(0), - RepositoryName: ref.Name(), - Tag: tag, - Force: *force, - } - - return cli.client.ImageTag(options) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go deleted file mode 100644 index 90e2d7d3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/top.go +++ /dev/null @@ -1,39 +0,0 @@ -package client - -import ( - "fmt" - "strings" - "text/tabwriter" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdTop displays the running processes of a container. -// -// Usage: docker top CONTAINER -func (cli *DockerCli) CmdTop(args ...string) error { - cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var arguments []string - if cmd.NArg() > 1 { - arguments = cmd.Args()[1:] - } - - procList, err := cli.client.ContainerTop(cmd.Arg(0), arguments) - if err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) - - for _, proc := range procList.Processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go deleted file mode 100644 index d06db5d9..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/trust.go +++ /dev/null @@ -1,452 +0,0 @@ -package client - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "sort" - "strconv" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/jsonmessage" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - apiclient "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary/client" - "github.com/docker/notary/passphrase" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" -) - -var ( - releasesRole = path.Join(data.CanonicalTargetsRole, "releases") - untrusted bool -) - -func addTrustedFlags(fs *flag.FlagSet, verify bool) { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - message := "Skip image signing" - if verify { - message = "Skip image verification" - } - fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message) -} - -func isTrusted() bool { - return !untrusted -} - -type target struct { - reference registry.Reference - digest digest.Digest - size int64 -} - -func (cli *DockerCli) trustDirectory() string { - return filepath.Join(cliconfig.ConfigDir(), "trust") -} - -// certificateDirectory returns the directory containing -// TLS certificates for the given server. An error is -// returned if there was an error parsing the server string. -func (cli *DockerCli) certificateDirectory(server string) (string, error) { - u, err := url.Parse(server) - if err != nil { - return "", err - } - - return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil -} - -func trustServer(index *registrytypes.IndexInfo) (string, error) { - if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { - urlObj, err := url.Parse(s) - if err != nil || urlObj.Scheme != "https" { - return "", fmt.Errorf("valid https URL required for trust server, got %s", s) - } - - return s, nil - } - if index.Official { - return registry.NotaryServer, nil - } - return "https://" + index.Name, nil -} - -type simpleCredentialStore struct { - auth types.AuthConfig -} - -func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { - return scs.auth.Username, scs.auth.Password -} - -func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig) (*client.NotaryRepository, error) { - server, err := trustServer(repoInfo.Index) - if err != nil { - return nil, err - } - - var cfg = tlsconfig.ClientDefault - cfg.InsecureSkipVerify = !repoInfo.Index.Secure - - // Get certificate base directory - certDir, err := cli.certificateDirectory(server) - if err != nil { - return nil, err - } - logrus.Debugf("reading certificate directory: %s", certDir) - - if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { - return nil, err - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &cfg, - DisableKeepAlives: true, - } - - // Skip configuration headers since request is not going to Docker daemon - modifiers := registry.DockerHeaders(http.Header{}) - authTransport := transport.NewTransport(base, modifiers...) - pingClient := &http.Client{ - Transport: authTransport, - Timeout: 5 * time.Second, - } - endpointStr := server + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, err - } - - challengeManager := auth.NewSimpleChallengeManager() - - resp, err := pingClient.Do(req) - if err != nil { - // Ignore error on ping to operate in offline mode - logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) - } else { - defer resp.Body.Close() - - // Add response to the challenge manager to parse out - // authentication header and register authentication method - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err - } - } - - creds := simpleCredentialStore{auth: authConfig} - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.FullName(), "push", "pull") - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) - tr := transport.NewTransport(base, modifiers...) - - return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.FullName(), server, tr, cli.getPassphraseRetriever()) -} - -func convertTarget(t client.Target) (target, error) { - h, ok := t.Hashes["sha256"] - if !ok { - return target{}, errors.New("no valid hash, expecting sha256") - } - return target{ - reference: registry.ParseReference(t.Name), - digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), - size: t.Length, - }, nil -} - -func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { - aliasMap := map[string]string{ - "root": "root", - "snapshot": "repository", - "targets": "repository", - "targets/releases": "repository", - } - baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) - env := map[string]string{ - "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), - "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "targets/releases": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - } - - // Backwards compatibility with old env names. We should remove this in 1.10 - if env["root"] == "" { - if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"); passphrase != "" { - env["root"] = passphrase - fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE\n") - } - } - if env["snapshot"] == "" || env["targets"] == "" || env["targets/releases"] == "" { - if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"); passphrase != "" { - env["snapshot"] = passphrase - env["targets"] = passphrase - env["targets/releases"] = passphrase - fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE\n") - } - } - - return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { - if v := env[alias]; v != "" { - return v, numAttempts > 1, nil - } - return baseRetriever(keyName, alias, createNew, numAttempts) - } -} - -func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Canonical, error) { - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return nil, err - } - - // Resolve the Auth config relevant for this server - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) - - notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) - return nil, err - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, err - } - r, err := convertTarget(t.Target) - if err != nil { - return nil, err - - } - - return reference.WithDigest(ref, r.digest) -} - -func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.NamedTagged) error { - fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) - - options := types.ImageTagOptions{ - ImageID: trustedRef.String(), - RepositoryName: trustedRef.Name(), - Tag: ref.Tag(), - Force: true, - } - - return cli.client.ImageTag(options) -} - -func notaryError(repoName string, err error) error { - switch err.(type) { - case *json.SyntaxError: - logrus.Debugf("Notary syntax error: %s", err) - return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) - case signed.ErrExpired: - return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) - case trustmanager.ErrKeyNotFound: - return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) - case *net.OpError: - return fmt.Errorf("Error: error contacting notary server: %v", err) - case store.ErrMetaNotFound: - return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) - case signed.ErrInvalidKeyType: - return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) - case signed.ErrNoKeys: - return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) - case signed.ErrLowVersion: - return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) - case signed.ErrRoleThreshold: - return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) - case client.ErrRepositoryNotExist: - return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) - case signed.ErrInsufficientSignatures: - return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) - } - - return err -} - -func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege apiclient.RequestPrivilegeFunc) error { - var refs []target - - notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) - return err - } - - if ref.String() == "" { - // List all targets - targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole) - if err != nil { - return notaryError(repoInfo.FullName(), err) - } - for _, tgt := range targets { - t, err := convertTarget(tgt.Target) - if err != nil { - fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name()) - continue - } - refs = append(refs, t) - } - } else { - t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole) - if err != nil { - return notaryError(repoInfo.FullName(), err) - } - r, err := convertTarget(t.Target) - if err != nil { - return err - - } - refs = append(refs, r) - } - - for i, r := range refs { - displayTag := r.reference.String() - if displayTag != "" { - displayTag = ":" + displayTag - } - fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) - - if err := cli.imagePullPrivileged(authConfig, repoInfo.Name(), r.digest.String(), requestPrivilege); err != nil { - return err - } - - // If reference is not trusted, tag by trusted reference - if !r.reference.HasDigest() { - tagged, err := reference.WithTag(repoInfo, r.reference.String()) - if err != nil { - return err - } - trustedRef, err := reference.WithDigest(repoInfo, r.digest) - if err != nil { - return err - } - if err := cli.tagTrusted(trustedRef, tagged); err != nil { - return err - } - } - } - return nil -} - -func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, authConfig types.AuthConfig, requestPrivilege apiclient.RequestPrivilegeFunc) error { - responseBody, err := cli.imagePushPrivileged(authConfig, repoInfo.Name(), tag, requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - targets := []target{} - handleTarget := func(aux *json.RawMessage) { - var pushResult distribution.PushResult - err := json.Unmarshal(*aux, &pushResult) - if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { - targets = append(targets, target{ - reference: registry.ParseReference(pushResult.Tag), - digest: pushResult.Digest, - size: int64(pushResult.Size), - }) - } - } - - err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, handleTarget) - if err != nil { - return err - } - - if tag == "" { - fmt.Fprintf(cli.out, "No tag specified, skipping trust metadata push\n") - return nil - } - if len(targets) == 0 { - fmt.Fprintf(cli.out, "No targets found, skipping trust metadata push\n") - return nil - } - - fmt.Fprintf(cli.out, "Signing and pushing trust metadata\n") - - repo, err := cli.getNotaryRepository(repoInfo, authConfig) - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) - return err - } - - for _, target := range targets { - h, err := hex.DecodeString(target.digest.Hex()) - if err != nil { - return err - } - t := &client.Target{ - Name: target.reference.String(), - Hashes: data.Hashes{ - string(target.digest.Algorithm()): h, - }, - Length: int64(target.size), - } - if err := repo.AddTarget(t, releasesRole); err != nil { - return err - } - } - - err = repo.Publish() - if _, ok := err.(client.ErrRepoNotInitialized); !ok { - return notaryError(repoInfo.FullName(), err) - } - - keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) - - var rootKeyID string - // always select the first root key - if len(keys) > 0 { - sort.Strings(keys) - rootKeyID = keys[0] - } else { - rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, data.ECDSAKey) - if err != nil { - return err - } - rootKeyID = rootPublicKey.ID() - } - - if err := repo.Initialize(rootKeyID); err != nil { - return notaryError(repoInfo.FullName(), err) - } - fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName()) - - return notaryError(repoInfo.FullName(), repo.Publish()) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go deleted file mode 100644 index 9ec3310d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/unpause.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdUnpause unpauses all processes within a container, for one or more containers. -// -// Usage: docker unpause CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdUnpause(args ...string) error { - cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - if err := cli.client.ContainerUnpause(name); err != nil { - errs = append(errs, fmt.Sprintf("Failed to unpause container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/update.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/update.go deleted file mode 100644 index 70830488..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/update.go +++ /dev/null @@ -1,104 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" -) - -// CmdUpdate updates resources of one or more containers. -// -// Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdUpdate(args ...string) error { - cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) - flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") - flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") - flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") - flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") - flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") - flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") - - cmd.Require(flag.Min, 1) - cmd.ParseFlags(args, true) - if cmd.NFlag() == 0 { - return fmt.Errorf("You must provide one or more flags when using this command.") - } - - var err error - var flMemory int64 - if *flMemoryString != "" { - flMemory, err = units.RAMInBytes(*flMemoryString) - if err != nil { - return err - } - } - - var memoryReservation int64 - if *flMemoryReservation != "" { - memoryReservation, err = units.RAMInBytes(*flMemoryReservation) - if err != nil { - return err - } - } - - var memorySwap int64 - if *flMemorySwap != "" { - if *flMemorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(*flMemorySwap) - if err != nil { - return err - } - } - } - - var kernelMemory int64 - if *flKernelMemory != "" { - kernelMemory, err = units.RAMInBytes(*flKernelMemory) - if err != nil { - return err - } - } - - resources := container.Resources{ - BlkioWeight: *flBlkioWeight, - CpusetCpus: *flCpusetCpus, - CpusetMems: *flCpusetMems, - CPUShares: *flCPUShares, - Memory: flMemory, - MemoryReservation: memoryReservation, - MemorySwap: memorySwap, - KernelMemory: kernelMemory, - CPUPeriod: *flCPUPeriod, - CPUQuota: *flCPUQuota, - } - - updateConfig := container.UpdateConfig{ - Resources: resources, - } - - names := cmd.Args() - var errs []string - for _, name := range names { - if err := cli.client.ContainerUpdate(name, updateConfig); err != nil { - errs = append(errs, fmt.Sprintf("Failed to update container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go deleted file mode 100644 index 687add5b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go +++ /dev/null @@ -1,140 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "os" - gosignal "os/signal" - "runtime" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -func (cli *DockerCli) encodeRegistryAuth(index *registrytypes.IndexInfo) (string, error) { - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, index) - return encodeAuthToBase64(authConfig) -} - -func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) - if err := cli.CmdLogin(registry.GetAuthConfigKey(index)); err != nil { - return "", err - } - return cli.encodeRegistryAuth(index) - } -} - -func (cli *DockerCli) resizeTty(id string, isExec bool) { - height, width := cli.getTtySize() - if height == 0 && width == 0 { - return - } - - options := types.ResizeOptions{ - ID: id, - Height: height, - Width: width, - } - - var err error - if isExec { - err = cli.client.ContainerExecResize(options) - } else { - err = cli.client.ContainerResize(options) - } - - if err != nil { - logrus.Debugf("Error resize: %s", err) - } -} - -// getExitCode perform an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { - c, err := cli.client.ContainerInspect(containerID) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != client.ErrConnectionFailed { - return false, -1, err - } - return false, -1, nil - } - - return c.State.Running, c.State.ExitCode, nil -} - -// getExecExitCode perform an inspect on the exec command. It returns -// the running state and the exit code. -func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { - resp, err := cli.client.ContainerExecInspect(execID) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != client.ErrConnectionFailed { - return false, -1, err - } - return false, -1, nil - } - - return resp.Running, resp.ExitCode, nil -} - -func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { - cli.resizeTty(id, isExec) - - if runtime.GOOS == "windows" { - go func() { - prevH, prevW := cli.getTtySize() - for { - time.Sleep(time.Millisecond * 250) - h, w := cli.getTtySize() - - if prevW != w || prevH != h { - cli.resizeTty(id, isExec) - } - prevH = h - prevW = w - } - }() - } else { - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, signal.SIGWINCH) - go func() { - for range sigchan { - cli.resizeTty(id, isExec) - } - }() - } - return nil -} - -func (cli *DockerCli) getTtySize() (int, int) { - if !cli.isTerminalOut { - return 0, 0 - } - ws, err := term.GetWinsize(cli.outFd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_unix.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_unix.go deleted file mode 100644 index ff10ddde..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package client - -import ( - "path/filepath" -) - -func getContextRoot(srcPath string) (string, error) { - return filepath.Join(srcPath, "."), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_windows.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_windows.go deleted file mode 100644 index 09c33dad..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package client - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go deleted file mode 100644 index a64deef6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/version.go +++ /dev/null @@ -1,92 +0,0 @@ -package client - -import ( - "runtime" - "text/template" - "time" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/dockerversion" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types" -) - -var versionTemplate = `Client: - Version: {{.Client.Version}} - API version: {{.Client.APIVersion}} - Go version: {{.Client.GoVersion}} - Git commit: {{.Client.GitCommit}} - Built: {{.Client.BuildTime}} - OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} - Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} - -Server: - Version: {{.Server.Version}} - API version: {{.Server.APIVersion}} - Go version: {{.Server.GoVersion}} - Git commit: {{.Server.GitCommit}} - Built: {{.Server.BuildTime}} - OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} - Experimental: {{.Server.Experimental}}{{end}}{{end}}` - -// CmdVersion shows Docker version information. -// -// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. -// -// Usage: docker version -func (cli *DockerCli) CmdVersion(args ...string) (err error) { - cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) - tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") - cmd.Require(flag.Exact, 0) - - cmd.ParseFlags(args, true) - - templateFormat := versionTemplate - if *tmplStr != "" { - templateFormat = *tmplStr - } - - var tmpl *template.Template - if tmpl, err = template.New("").Funcs(funcMap).Parse(templateFormat); err != nil { - return Cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - - vd := types.VersionResponse{ - Client: &types.Version{ - Version: dockerversion.Version, - APIVersion: cli.client.ClientVersion(), - GoVersion: runtime.Version(), - GitCommit: dockerversion.GitCommit, - BuildTime: dockerversion.BuildTime, - Os: runtime.GOOS, - Arch: runtime.GOARCH, - Experimental: utils.ExperimentalBuild(), - }, - } - - serverVersion, err := cli.client.ServerVersion() - if err == nil { - vd.Server = &serverVersion - } - - // first we need to make BuildTime more human friendly - t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) - if errTime == nil { - vd.Client.BuildTime = t.Format(time.ANSIC) - } - - if vd.ServerOK() { - t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) - if errTime == nil { - vd.Server.BuildTime = t.Format(time.ANSIC) - } - } - - if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { - err = err2 - } - cli.out.Write([]byte{'\n'}) - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/volume.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/volume.go deleted file mode 100644 index 284e30c3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/volume.go +++ /dev/null @@ -1,160 +0,0 @@ -package client - -import ( - "fmt" - "text/tabwriter" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -// CmdVolume is the parent subcommand for all volume commands -// -// Usage: docker volume -func (cli *DockerCli) CmdVolume(args ...string) error { - description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" - commands := [][]string{ - {"create", "Create a volume"}, - {"inspect", "Return low-level information on a volume"}, - {"ls", "List volumes"}, - {"rm", "Remove a volume"}, - } - - for _, cmd := range commands { - description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) - } - - description += "\nRun 'docker volume COMMAND --help' for more information on a command" - cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) - - cmd.Require(flag.Exact, 0) - err := cmd.ParseFlags(args, true) - cmd.Usage() - return err -} - -// CmdVolumeLs outputs a list of Docker volumes. -// -// Usage: docker volume ls [OPTIONS] -func (cli *DockerCli) CmdVolumeLs(args ...string) error { - cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) - - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") - flFilter := opts.NewListOpts(nil) - cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") - - cmd.Require(flag.Exact, 0) - cmd.ParseFlags(args, true) - - volFilterArgs := filters.NewArgs() - for _, f := range flFilter.GetAll() { - var err error - volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) - if err != nil { - return err - } - } - - volumes, err := cli.client.VolumeList(volFilterArgs) - if err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - for _, warn := range volumes.Warnings { - fmt.Fprintln(cli.err, warn) - } - fmt.Fprintf(w, "DRIVER \tVOLUME NAME") - fmt.Fprintf(w, "\n") - } - - for _, vol := range volumes.Volumes { - if *quiet { - fmt.Fprintln(w, vol.Name) - continue - } - fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) - } - w.Flush() - return nil -} - -// CmdVolumeInspect displays low-level information on one or more volumes. -// -// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] -func (cli *DockerCli) CmdVolumeInspect(args ...string) error { - cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) - tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") - - cmd.Require(flag.Min, 1) - cmd.ParseFlags(args, true) - - if err := cmd.Parse(args); err != nil { - return nil - } - - inspectSearcher := func(name string) (interface{}, []byte, error) { - i, err := cli.client.VolumeInspect(name) - return i, nil, err - } - - return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) -} - -// CmdVolumeCreate creates a new volume. -// -// Usage: docker volume create [OPTIONS] -func (cli *DockerCli) CmdVolumeCreate(args ...string) error { - cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) - flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") - flName := cmd.String([]string{"-name"}, "", "Specify volume name") - - flDriverOpts := opts.NewMapOpts(nil, nil) - cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") - - cmd.Require(flag.Exact, 0) - cmd.ParseFlags(args, true) - - volReq := types.VolumeCreateRequest{ - Driver: *flDriver, - DriverOpts: flDriverOpts.GetAll(), - Name: *flName, - } - - vol, err := cli.client.VolumeCreate(volReq) - if err != nil { - return err - } - - fmt.Fprintf(cli.out, "%s\n", vol.Name) - return nil -} - -// CmdVolumeRm removes one or more volumes. -// -// Usage: docker volume rm VOLUME [VOLUME...] -func (cli *DockerCli) CmdVolumeRm(args ...string) error { - cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) - cmd.Require(flag.Min, 1) - cmd.ParseFlags(args, true) - - var status = 0 - - for _, name := range cmd.Args() { - if err := cli.client.VolumeRemove(name); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(cli.out, "%s\n", name) - } - - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go deleted file mode 100644 index d77a523e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/wait.go +++ /dev/null @@ -1,35 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" -) - -// CmdWait blocks until a container stops, then prints its exit code. -// -// If more than one container is specified, this will wait synchronously on each container. -// -// Usage: docker wait CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdWait(args ...string) error { - cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - var errs []string - for _, name := range cmd.Args() { - status, err := cli.client.ContainerWait(name) - if err != nil { - errs = append(errs, fmt.Sprintf("Failed to wait container (%s): %s", name, err)) - } else { - fmt.Fprintf(cli.out, "%d\n", status) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/fixtures/keyfile b/Godeps/_workspace/src/github.com/docker/docker/api/fixtures/keyfile deleted file mode 100644 index 322f2544..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/fixtures/keyfile +++ /dev/null @@ -1,7 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY - -MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 -AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky -NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== ------END EC PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/form.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/form.go deleted file mode 100644 index 20188c12..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/form.go +++ /dev/null @@ -1,73 +0,0 @@ -package httputils - -import ( - "fmt" - "net/http" - "path/filepath" - "strconv" - "strings" -) - -// BoolValue transforms a form value in different formats into a boolean type. -func BoolValue(r *http.Request, k string) bool { - s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) - return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") -} - -// BoolValueOrDefault returns the default bool passed if the query param is -// missing, otherwise it's just a proxy to boolValue above -func BoolValueOrDefault(r *http.Request, k string, d bool) bool { - if _, ok := r.Form[k]; !ok { - return d - } - return BoolValue(r, k) -} - -// Int64ValueOrZero parses a form value into an int64 type. -// It returns 0 if the parsing fails. -func Int64ValueOrZero(r *http.Request, k string) int64 { - val, err := Int64ValueOrDefault(r, k, 0) - if err != nil { - return 0 - } - return val -} - -// Int64ValueOrDefault parses a form value into an int64 type. If there is an -// error, returns the error. If there is no value returns the default value. -func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { - if r.Form.Get(field) != "" { - value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) - if err != nil { - return value, err - } - return value, nil - } - return def, nil -} - -// ArchiveOptions stores archive information for different operations. -type ArchiveOptions struct { - Name string - Path string -} - -// ArchiveFormValues parses form values and turns them into ArchiveOptions. -// It fails if the archive name and path are not in the request. -func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { - if err := ParseForm(r); err != nil { - return ArchiveOptions{}, err - } - - name := vars["name"] - path := filepath.FromSlash(r.Form.Get("path")) - - switch { - case name == "": - return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") - case path == "": - return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") - } - - return ArchiveOptions{name, path}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/httputils.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/httputils.go deleted file mode 100644 index ecf26e2a..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/httputils/httputils.go +++ /dev/null @@ -1,178 +0,0 @@ -package httputils - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/api" - "github.com/docker/docker/pkg/version" -) - -// APIVersionKey is the client's requested API version. -const APIVersionKey = "api-version" - -// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. -// Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion). -type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -// HijackConnection interrupts the http response writer to get the -// underlying connection and operate with it. -func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -// CloseStreams ensures that a list for http streams are properly closed. -func CloseStreams(streams ...interface{}) { - for _, stream := range streams { - if tcpc, ok := stream.(interface { - CloseWrite() error - }); ok { - tcpc.CloseWrite() - } else if closer, ok := stream.(io.Closer); ok { - closer.Close() - } - } -} - -// CheckForJSON makes sure that the request's Content-Type is application/json. -func CheckForJSON(r *http.Request) error { - ct := r.Header.Get("Content-Type") - - // No Content-Type header is ok as long as there's no Body - if ct == "" { - if r.Body == nil || r.ContentLength == 0 { - return nil - } - } - - // Otherwise it better be json - if api.MatchesContentType(ct, "application/json") { - return nil - } - return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) -} - -// ParseForm ensures the request form is parsed even with invalid content types. -// If we don't do this, POST method without Content-type (even with empty body) will fail. -func ParseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -// ParseMultipartForm ensure the request form is parsed, even with invalid content types. -func ParseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -// WriteError decodes a specific docker error and sends it in the response. -func WriteError(w http.ResponseWriter, err error) { - if err == nil || w == nil { - logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") - return - } - - statusCode := http.StatusInternalServerError - errMsg := err.Error() - - // Based on the type of error we get we need to process things - // slightly differently to extract the error message. - // In the 'errcode.*' cases there are two different type of - // error that could be returned. errocode.ErrorCode is the base - // type of error object - it is just an 'int' that can then be - // used as the look-up key to find the message. errorcode.Error - // extends errorcode.Error by adding error-instance specific - // data, like 'details' or variable strings to be inserted into - // the message. - // - // Ideally, we should just be able to call err.Error() for all - // cases but the errcode package doesn't support that yet. - // - // Additionally, in both errcode cases, there might be an http - // status code associated with it, and if so use it. - switch err.(type) { - case errcode.ErrorCode: - daError, _ := err.(errcode.ErrorCode) - statusCode = daError.Descriptor().HTTPStatusCode - errMsg = daError.Message() - - case errcode.Error: - // For reference, if you're looking for a particular error - // then you can do something like : - // import ( derr "github.com/docker/docker/errors" ) - // if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... } - - daError, _ := err.(errcode.Error) - statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode - errMsg = daError.Message - - default: - // This part of will be removed once we've - // converted everything over to use the errcode package - - // FIXME: this is brittle and should not be necessary. - // If we need to differentiate between different possible error types, - // we should create appropriate error types with clearly defined meaning - errStr := strings.ToLower(err.Error()) - for keyword, status := range map[string]int{ - "not found": http.StatusNotFound, - "no such": http.StatusNotFound, - "bad parameter": http.StatusBadRequest, - "conflict": http.StatusConflict, - "impossible": http.StatusNotAcceptable, - "wrong login/password": http.StatusUnauthorized, - "hasn't been activated": http.StatusForbidden, - } { - if strings.Contains(errStr, keyword) { - statusCode = status - break - } - } - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - http.Error(w, errMsg, statusCode) -} - -// WriteJSON writes the value v to the http response stream as json with standard json encoding. -func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return json.NewEncoder(w).Encode(v) -} - -// VersionFromContext returns an API version from the context using APIVersionKey. -// It panics if the context value does not have version.Version type. -func VersionFromContext(ctx context.Context) (ver version.Version) { - if ctx == nil { - return - } - val := ctx.Value(APIVersionKey) - if val == nil { - return - } - return val.(version.Version) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/middleware.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/middleware.go deleted file mode 100644 index c978478e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/middleware.go +++ /dev/null @@ -1,195 +0,0 @@ -package server - -import ( - "bufio" - "encoding/json" - "io" - "net/http" - "runtime" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/version" - "golang.org/x/net/context" -) - -// middleware is an adapter to allow the use of ordinary functions as Docker API filters. -// Any function that has the appropriate signature can be register as a middleware. -type middleware func(handler httputils.APIFunc) httputils.APIFunc - -// debugRequestMiddleware dumps the request to logger -func debugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - logrus.Debugf("%s %s", r.Method, r.RequestURI) - - if r.Method != "POST" { - return handler(ctx, w, r, vars) - } - if err := httputils.CheckForJSON(r); err != nil { - return handler(ctx, w, r, vars) - } - maxBodySize := 4096 // 4KB - if r.ContentLength > int64(maxBodySize) { - return handler(ctx, w, r, vars) - } - - body := r.Body - bufReader := bufio.NewReaderSize(body, maxBodySize) - r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - b, err := bufReader.Peek(maxBodySize) - if err != io.EOF { - // either there was an error reading, or the buffer is full (in which case the request is too large) - return handler(ctx, w, r, vars) - } - - var postForm map[string]interface{} - if err := json.Unmarshal(b, &postForm); err == nil { - if _, exists := postForm["password"]; exists { - postForm["password"] = "*****" - } - formStr, errMarshal := json.Marshal(postForm) - if errMarshal == nil { - logrus.Debugf("form data: %s", string(formStr)) - } else { - logrus.Debugf("form data: %q", postForm) - } - } - - return handler(ctx, w, r, vars) - } -} - -// authorizationMiddleware perform authorization on the request. -func (s *Server) authorizationMiddleware(handler httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // FIXME: fill when authN gets in - // User and UserAuthNMethod are taken from AuthN plugins - // Currently tracked in https://github.com/docker/docker/pull/13994 - user := "" - userAuthNMethod := "" - authCtx := authorization.NewCtx(s.authZPlugins, user, userAuthNMethod, r.Method, r.RequestURI) - - if err := authCtx.AuthZRequest(w, r); err != nil { - logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - rw := authorization.NewResponseModifier(w) - - if err := handler(ctx, rw, r, vars); err != nil { - logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - if err := authCtx.AuthZResponse(rw, r); err != nil { - logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - return nil - } -} - -// userAgentMiddleware checks the User-Agent header looking for a valid docker client spec. -func (s *Server) userAgentMiddleware(handler httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - dockerVersion := version.Version(s.cfg.Version) - - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - - // v1.20 onwards includes the GOOS of the client after the version - // such as Docker/1.7.0 (linux) - if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { - userAgent[1] = strings.Split(userAgent[1], " ")[0] - } - - if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { - logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) - } - } - return handler(ctx, w, r, vars) - } -} - -// corsMiddleware sets the CORS header expectations in the server. -func (s *Server) corsMiddleware(handler httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" - // otherwise, all head values will be passed to HTTP handler - corsHeaders := s.cfg.CorsHeaders - if corsHeaders == "" && s.cfg.EnableCors { - corsHeaders = "*" - } - - if corsHeaders != "" { - writeCorsHeaders(w, r, corsHeaders) - } - return handler(ctx, w, r, vars) - } -} - -// versionMiddleware checks the api version requirements before passing the request to the server handler. -func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - apiVersion := version.Version(vars["version"]) - if apiVersion == "" { - apiVersion = api.DefaultVersion - } - - if apiVersion.GreaterThan(api.DefaultVersion) { - return errors.ErrorCodeNewerClientVersion.WithArgs(apiVersion, api.DefaultVersion) - } - if apiVersion.LessThan(api.MinVersion) { - return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.DefaultVersion) - } - - w.Header().Set("Server", "Docker/"+dockerversion.Version+" ("+runtime.GOOS+")") - ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) - return handler(ctx, w, r, vars) - } -} - -// handleWithGlobalMiddlwares wraps the handler function for a request with -// the server's global middlewares. The order of the middlewares is backwards, -// meaning that the first in the list will be evaluated last. -// -// Example: handleWithGlobalMiddlewares(s.getContainersName) -// -// s.loggingMiddleware( -// s.userAgentMiddleware( -// s.corsMiddleware( -// versionMiddleware(s.getContainersName) -// ) -// ) -// ) -// ) -func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { - middlewares := []middleware{ - versionMiddleware, - s.corsMiddleware, - s.userAgentMiddleware, - } - - // Only want this on debug level - if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { - middlewares = append(middlewares, debugRequestMiddleware) - } - - if len(s.cfg.AuthorizationPluginNames) > 0 { - s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) - middlewares = append(middlewares, s.authorizationMiddleware) - } - - h := handler - for _, m := range middlewares { - h = m(h) - } - return h -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go deleted file mode 100644 index 766462bd..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/profiler.go +++ /dev/null @@ -1,38 +0,0 @@ -package server - -import ( - "expvar" - "fmt" - "net/http" - "net/http/pprof" - - "github.com/gorilla/mux" -) - -func profilerSetup(mainRouter *mux.Router, path string) { - var r = mainRouter.PathPrefix(path).Subrouter() - r.HandleFunc("/vars", expVars) - r.HandleFunc("/pprof/", pprof.Index) - r.HandleFunc("/pprof/cmdline", pprof.Cmdline) - r.HandleFunc("/pprof/profile", pprof.Profile) - r.HandleFunc("/pprof/symbol", pprof.Symbol) - r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) - r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) - r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) - r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) -} - -// Replicated from expvar.go as not public. -func expVars(w http.ResponseWriter, r *http.Request) { - first := true - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/backend.go deleted file mode 100644 index fd9e314a..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/backend.go +++ /dev/null @@ -1,12 +0,0 @@ -package build - -// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. -type Backend interface { - // Build builds a Docker image referenced by an imageID string. - // - // Note: Tagging an image should not be done by a Builder, it should instead be done - // by the caller. - // - // TODO: make this return a reference instead of string - Build() (imageID string) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build.go deleted file mode 100644 index e21b6346..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build.go +++ /dev/null @@ -1,33 +0,0 @@ -package build - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/local" - "github.com/docker/docker/daemon" -) - -// buildRouter is a router to talk with the build controller -type buildRouter struct { - backend *daemon.Daemon - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b *daemon.Daemon) router.Router { - r := &buildRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the build controller -func (r *buildRouter) Routes() []router.Route { - return r.routes -} - -func (r *buildRouter) initRoutes() { - r.routes = []router.Route{ - local.NewPostRoute("/build", r.postBuild), - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build_routes.go deleted file mode 100644 index 5cda79c3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/build/build_routes.go +++ /dev/null @@ -1,260 +0,0 @@ -package build - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/daemon/daemonbuilder" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/reference" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" - "golang.org/x/net/context" -) - -// sanitizeRepoAndTags parses the raw "t" parameter received from the client -// to a slice of repoAndTag. -// It also validates each repoName and tag. -func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { - var ( - repoAndTags []reference.Named - // This map is used for deduplicating the "-t" parameter. - uniqNames = make(map[string]struct{}) - ) - for _, repo := range names { - if repo == "" { - continue - } - - ref, err := reference.ParseNamed(repo) - if err != nil { - return nil, err - } - - ref = reference.WithDefaultTag(ref) - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("build tag cannot contain a digest") - } - - if _, isTagged := ref.(reference.NamedTagged); !isTagged { - ref, err = reference.WithTag(ref, reference.DefaultTag) - } - - nameWithTag := ref.String() - - if _, exists := uniqNames[nameWithTag]; !exists { - uniqNames[nameWithTag] = struct{}{} - repoAndTags = append(repoAndTags, ref) - } - } - return repoAndTags, nil -} - -func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { - version := httputils.VersionFromContext(ctx) - options := &types.ImageBuildOptions{} - if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { - options.Remove = true - } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { - options.Remove = true - } else { - options.Remove = httputils.BoolValue(r, "rm") - } - if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { - options.PullParent = true - } - - options.Dockerfile = r.FormValue("dockerfile") - options.SuppressOutput = httputils.BoolValue(r, "q") - options.NoCache = httputils.BoolValue(r, "nocache") - options.ForceRemove = httputils.BoolValue(r, "forcerm") - options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") - options.Memory = httputils.Int64ValueOrZero(r, "memory") - options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") - options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") - options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") - options.CPUSetCPUs = r.FormValue("cpusetcpus") - options.CPUSetMems = r.FormValue("cpusetmems") - options.CgroupParent = r.FormValue("cgroupparent") - - if r.Form.Get("shmsize") != "" { - shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) - if err != nil { - return nil, err - } - options.ShmSize = shmSize - } - - if i := container.IsolationLevel(r.FormValue("isolation")); i != "" { - if !container.IsolationLevel.IsValid(i) { - return nil, fmt.Errorf("Unsupported isolation: %q", i) - } - options.IsolationLevel = i - } - - var buildUlimits = []*units.Ulimit{} - ulimitsJSON := r.FormValue("ulimits") - if ulimitsJSON != "" { - if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { - return nil, err - } - options.Ulimits = buildUlimits - } - - var buildArgs = map[string]string{} - buildArgsJSON := r.FormValue("buildargs") - if buildArgsJSON != "" { - if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { - return nil, err - } - options.BuildArgs = buildArgs - } - return options, nil -} - -func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfigs = map[string]types.AuthConfig{} - authConfigsEncoded = r.Header.Get("X-Registry-Config") - notVerboseBuffer = bytes.NewBuffer(nil) - ) - - if authConfigsEncoded != "" { - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) - if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting - // to be empty. - } - } - - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - sf := streamformatter.NewJSONStreamFormatter() - errf := func(err error) error { - if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { - output.Write(notVerboseBuffer.Bytes()) - } - // Do not write the error in the http output if it's still empty. - // This prevents from writing a 200(OK) when there is an internal error. - if !output.Flushed() { - return err - } - _, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) - if err != nil { - logrus.Warnf("could not write error response: %v", err) - } - return nil - } - - buildOptions, err := newImageBuildOptions(ctx, r) - if err != nil { - return errf(err) - } - - repoAndTags, err := sanitizeRepoAndTags(r.Form["t"]) - if err != nil { - return errf(err) - } - - remoteURL := r.FormValue("remote") - - // Currently, only used if context is from a remote url. - // Look at code in DetectContextFromRemoteURL for more information. - createProgressReader := func(in io.ReadCloser) io.ReadCloser { - progressOutput := sf.NewProgressOutput(output, true) - if buildOptions.SuppressOutput { - progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) - } - return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) - } - - var ( - context builder.ModifiableContext - dockerfileName string - ) - context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) - if err != nil { - return errf(err) - } - defer func() { - if err := context.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) - } - }() - if len(dockerfileName) > 0 { - buildOptions.Dockerfile = dockerfileName - } - - b, err := dockerfile.NewBuilder( - buildOptions, // result of newBuildConfig - &daemonbuilder.Docker{br.backend}, - builder.DockerIgnoreContext{ModifiableContext: context}, - nil) - if err != nil { - return errf(err) - } - if buildOptions.SuppressOutput { - b.Output = notVerboseBuffer - } else { - b.Output = output - } - b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} - b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf} - if buildOptions.SuppressOutput { - b.Stdout = &streamformatter.StdoutFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} - b.Stderr = &streamformatter.StderrFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} - } - - if closeNotifier, ok := w.(http.CloseNotifier); ok { - finished := make(chan struct{}) - defer close(finished) - clientGone := closeNotifier.CloseNotify() - go func() { - select { - case <-finished: - case <-clientGone: - logrus.Infof("Client disconnected, cancelling job: build") - b.Cancel() - } - }() - } - - imgID, err := b.Build() - if err != nil { - return errf(err) - } - - for _, rt := range repoAndTags { - if err := br.backend.TagImage(rt, imgID); err != nil { - return errf(err) - } - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if buildOptions.SuppressOutput { - stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} - fmt.Fprintf(stdout, "%s\n", string(imgID)) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/backend.go deleted file mode 100644 index 80bed644..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/backend.go +++ /dev/null @@ -1,74 +0,0 @@ -package container - -import ( - "io" - "time" - - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/version" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -// execBackend includes functions to implement to provide exec functionality. -type execBackend interface { - ContainerExecCreate(config *types.ExecConfig) (string, error) - ContainerExecInspect(id string) (*exec.Config, error) - ContainerExecResize(name string, height, width int) error - ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error - ExecExists(name string) (bool, error) -} - -// copyBackend includes functions to implement to provide container copy functionality. -type copyBackend interface { - ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) - ContainerCopy(name string, res string) (io.ReadCloser, error) - ContainerExport(name string, out io.Writer) error - ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error - ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) -} - -// stateBackend includes functions to implement to provide container state lifecycle functionality. -type stateBackend interface { - ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) - ContainerKill(name string, sig uint64) error - ContainerPause(name string) error - ContainerRename(oldName, newName string) error - ContainerResize(name string, height, width int) error - ContainerRestart(name string, seconds int) error - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerStart(name string, hostConfig *container.HostConfig) error - ContainerStop(name string, seconds int) error - ContainerUnpause(name string) error - ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) - ContainerWait(name string, timeout time.Duration) (int, error) - Exists(id string) bool -} - -// monitorBackend includes functions to implement to provide containers monitoring functionality. -type monitorBackend interface { - ContainerChanges(name string) ([]archive.Change, error) - ContainerInspect(name string, size bool, version version.Version) (interface{}, error) - ContainerLogs(name string, config *daemon.ContainerLogsConfig) error - ContainerStats(name string, config *daemon.ContainerStatsConfig) error - ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) - - Containers(config *daemon.ContainersConfig) ([]*types.Container, error) -} - -// attachBackend includes function to implement to provide container attaching functionality. -type attachBackend interface { - ContainerAttachWithLogs(name string, c *daemon.ContainerAttachWithLogsConfig) error - ContainerWsAttachWithLogs(name string, c *daemon.ContainerWsAttachWithLogsConfig) error -} - -// Backend is all the methods that need to be implemented to provide container specific functionality. -type Backend interface { - execBackend - copyBackend - stateBackend - monitorBackend - attachBackend -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container.go deleted file mode 100644 index dd931650..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container.go +++ /dev/null @@ -1,66 +0,0 @@ -package container - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/local" -) - -// containerRouter is a router to talk with the container controller -type containerRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new container router -func NewRouter(b Backend) router.Router { - r := &containerRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the container controller -func (r *containerRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in container router -func (r *containerRouter) initRoutes() { - r.routes = []router.Route{ - // HEAD - local.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), - // GET - local.NewGetRoute("/containers/json", r.getContainersJSON), - local.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), - local.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), - local.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), - local.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), - local.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), - local.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), - local.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), - local.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), - local.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), - // POST - local.NewPostRoute("/containers/create", r.postContainersCreate), - local.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), - local.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), - local.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), - local.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), - local.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), - local.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), - local.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), - local.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), - local.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), - local.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), - local.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), - local.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), - local.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), - local.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), - local.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), - // PUT - local.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), - // DELETE - local.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container_routes.go deleted file mode 100644 index 4e2ffca2..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/container_routes.go +++ /dev/null @@ -1,503 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/daemon" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - timetypes "github.com/docker/engine-api/types/time" - "golang.org/x/net/context" - "golang.org/x/net/websocket" -) - -func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - config := &daemon.ContainersConfig{ - All: httputils.BoolValue(r, "all"), - Size: httputils.BoolValue(r, "size"), - Since: r.Form.Get("since"), - Before: r.Form.Get("before"), - Filters: r.Form.Get("filters"), - } - - if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { - limit, err := strconv.Atoi(tmpLimit) - if err != nil { - return err - } - config.Limit = limit - } - - containers, err := s.backend.Containers(config) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, containers) -} - -func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - stream := httputils.BoolValueOrDefault(r, "stream", true) - var out io.Writer - if !stream { - w.Header().Set("Content-Type", "application/json") - out = w - } else { - wf := ioutils.NewWriteFlusher(w) - out = wf - defer wf.Close() - } - - var closeNotifier <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - closeNotifier = notifier.CloseNotify() - } - - config := &daemon.ContainerStatsConfig{ - Stream: stream, - OutStream: out, - Stop: closeNotifier, - Version: httputils.VersionFromContext(ctx), - } - - return s.backend.ContainerStats(vars["name"], config) -} - -func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // Args are validated before the stream starts because when it starts we're - // sending HTTP 200 by writing an empty chunk of data to tell the client that - // daemon is going to stream. By sending this initial HTTP 200 we can't report - // any error after the stream starts (i.e. container not found, wrong parameters) - // with the appropriate status code. - stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") - if !(stdout || stderr) { - return fmt.Errorf("Bad parameters: you must choose at least one stream") - } - - var since time.Time - if r.Form.Get("since") != "" { - s, n, err := timetypes.ParseTimestamps(r.Form.Get("since"), 0) - if err != nil { - return err - } - since = time.Unix(s, n) - } - - var closeNotifier <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - closeNotifier = notifier.CloseNotify() - } - - containerName := vars["name"] - - if !s.backend.Exists(containerName) { - return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) - } - - // write an empty chunk of data (this is to ensure that the - // HTTP Response is sent immediately, even if the container has - // not yet produced any data) - w.WriteHeader(http.StatusOK) - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - - logsConfig := &daemon.ContainerLogsConfig{ - Follow: httputils.BoolValue(r, "follow"), - Timestamps: httputils.BoolValue(r, "timestamps"), - Since: since, - Tail: r.Form.Get("tail"), - UseStdout: stdout, - UseStderr: stderr, - OutStream: output, - Stop: closeNotifier, - } - - if err := s.backend.ContainerLogs(containerName, logsConfig); err != nil { - // The client may be expecting all of the data we're sending to - // be multiplexed, so send it through OutStream, which will - // have been set up to handle that if needed. - fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %s\n", utils.GetErrorMessage(err)) - } - - return nil -} - -func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.backend.ContainerExport(vars["name"], w) -} - -func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If contentLength is -1, we can assumed chunked encoding - // or more technically that the length is unknown - // https://golang.org/src/pkg/net/http/request.go#L139 - // net/http otherwise seems to swallow any headers related to chunked encoding - // including r.TransferEncoding - // allow a nil body for backwards compatibility - var hostConfig *container.HostConfig - if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - c, err := runconfig.DecodeHostConfig(r.Body) - if err != nil { - return err - } - - hostConfig = c - } - - if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - seconds, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var sig syscall.Signal - name := vars["name"] - - // If we have a signal, look at it. Otherwise, do nothing - if sigStr := r.Form.Get("signal"); sigStr != "" { - var err error - if sig, err = signal.ParseSignal(sigStr); err != nil { - return err - } - } - - if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { - theErr, isDerr := err.(errcode.ErrorCoder) - isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning - - // Return error that's not caused because the container is stopped. - // Return error if the container is not running and the api is >= 1.20 - // to keep backwards compatibility. - version := httputils.VersionFromContext(ctx) - if version.GreaterThanOrEqualTo("1.20") || !isStopped { - return fmt.Errorf("Cannot kill container %s: %v", name, utils.GetErrorMessage(err)) - } - } - - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - timeout, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerPause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerUnpause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ - StatusCode: status, - }) -} - -func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - changes, err := s.backend.ContainerChanges(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, changes) -} - -func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, procList) -} - -func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - newName := r.Form.Get("name") - if err := s.backend.ContainerRename(name, newName); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - var updateConfig container.UpdateConfig - - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&updateConfig); err != nil { - return err - } - - hostConfig := &container.HostConfig{ - Resources: updateConfig.Resources, - } - - name := vars["name"] - warnings, err := s.backend.ContainerUpdate(name, hostConfig) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ - Warnings: warnings, - }) -} - -func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - name := r.Form.Get("name") - - config, hostConfig, networkingConfig, err := runconfig.DecodeContainerConfig(r.Body) - if err != nil { - return err - } - version := httputils.VersionFromContext(ctx) - adjustCPUShares := version.LessThan("1.19") - - ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ - Name: name, - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - AdjustCPUShares: adjustCPUShares, - }) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, ccr) -} - -func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - config := &types.ContainerRmConfig{ - ForceRemove: httputils.BoolValue(r, "force"), - RemoveVolume: httputils.BoolValue(r, "v"), - RemoveLink: httputils.BoolValue(r, "link"), - } - - if err := s.backend.ContainerRm(name, config); err != nil { - // Force a 404 for the empty string - if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { - return fmt.Errorf("no such container: \"\"") - } - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.backend.ContainerResize(vars["name"], height, width) -} - -func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - err := httputils.ParseForm(r) - if err != nil { - return err - } - containerName := vars["name"] - - _, upgrade := r.Header["Upgrade"] - - keys := []byte{} - detachKeys := r.FormValue("detachKeys") - if detachKeys != "" { - keys, err = term.ToBytes(detachKeys) - if err != nil { - logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) - } - } - - attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ - Hijacker: w.(http.Hijacker), - Upgrade: upgrade, - UseStdin: httputils.BoolValue(r, "stdin"), - UseStdout: httputils.BoolValue(r, "stdout"), - UseStderr: httputils.BoolValue(r, "stderr"), - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: keys, - } - - return s.backend.ContainerAttachWithLogs(containerName, attachWithLogsConfig) -} - -func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - containerName := vars["name"] - - if !s.backend.Exists(containerName) { - return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) - } - - var keys []byte - var err error - detachKeys := r.FormValue("detachKeys") - if detachKeys != "" { - keys, err = term.ToBytes(detachKeys) - if err != nil { - logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) - } - } - - h := websocket.Handler(func(ws *websocket.Conn) { - defer ws.Close() - - wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ - InStream: ws, - OutStream: ws, - ErrStream: ws, - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: keys, - } - - if err := s.backend.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil { - logrus.Errorf("Error attaching websocket: %s", utils.GetErrorMessage(err)) - } - }) - ws := websocket.Server{Handler: h, Handshake: nil} - ws.ServeHTTP(w, r) - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/copy.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/copy.go deleted file mode 100644 index 69584b31..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/copy.go +++ /dev/null @@ -1,112 +0,0 @@ -package container - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// postContainersCopy is deprecated in favor of getContainersArchive. -func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cfg := types.CopyConfig{} - if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { - return err - } - - if cfg.Resource == "" { - return fmt.Errorf("Path cannot be empty") - } - - data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) - if err != nil { - if strings.Contains(strings.ToLower(err.Error()), "no such container") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if os.IsNotExist(err) { - return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) - } - return err - } - defer data.Close() - - w.Header().Set("Content-Type", "application/x-tar") - if _, err := io.Copy(w, data); err != nil { - return err - } - - return nil -} - -// // Encode the stat to JSON, base64 encode, and place in a header. -func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { - statJSON, err := json.Marshal(stat) - if err != nil { - return err - } - - header.Set( - "X-Docker-Container-Path-Stat", - base64.StdEncoding.EncodeToString(statJSON), - ) - - return nil -} - -func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - stat, err := s.backend.ContainerStatPath(v.Name, v.Path) - if err != nil { - return err - } - - return setContainerPathStatHeader(stat, w.Header()) -} - -func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) - if err != nil { - return err - } - defer tarArchive.Close() - - if err := setContainerPathStatHeader(stat, w.Header()); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - _, err = io.Copy(w, tarArchive) - - return err -} - -func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") - return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/exec.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/exec.go deleted file mode 100644 index caa5da06..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/exec.go +++ /dev/null @@ -1,135 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - eConfig, err := s.backend.ContainerExecInspect(vars["id"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, eConfig) -} - -func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - name := vars["name"] - - execConfig := &types.ExecConfig{} - if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { - return err - } - execConfig.Container = name - - if len(execConfig.Cmd) == 0 { - return fmt.Errorf("No exec command specified") - } - - // Register an instance of Exec in container. - id, err := s.backend.ContainerExecCreate(execConfig) - if err != nil { - logrus.Errorf("Error setting up exec command in container %s: %s", name, utils.GetErrorMessage(err)) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ - ID: id, - }) -} - -// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - if version.GreaterThan("1.21") { - if err := httputils.CheckForJSON(r); err != nil { - return err - } - } - - var ( - execName = vars["name"] - stdin, inStream io.ReadCloser - stdout, stderr, outStream io.Writer - ) - - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { - return err - } - - if exists, err := s.backend.ExecExists(execName); !exists { - return err - } - - if !execStartCheck.Detach { - var err error - // Setting up the streaming http interface. - inStream, outStream, err = httputils.HijackConnection(w) - if err != nil { - return err - } - defer httputils.CloseStreams(inStream, outStream) - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - stdin = inStream - stdout = outStream - if !execStartCheck.Tty { - stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - } else { - outStream = w - } - - // Now run the user process in container. - if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { - if execStartCheck.Detach { - return err - } - logrus.Errorf("Error running exec in container: %v\n", utils.GetErrorMessage(err)) - } - return nil -} - -func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.backend.ContainerExecResize(vars["name"], height, width) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/inspect.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/inspect.go deleted file mode 100644 index e3bb09a3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/container/inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -import ( - "net/http" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -// getContainersByName inspects containers configuration and serializes it as json. -func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - displaySize := httputils.BoolValue(r, "size") - - version := httputils.VersionFromContext(ctx) - json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, json) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/image.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/image.go deleted file mode 100644 index c1d1e830..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/image.go +++ /dev/null @@ -1,375 +0,0 @@ -package local - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/builder/dockerfile" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/reference" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cname := r.Form.Get("container") - - pause := httputils.BoolValue(r, "pause") - version := httputils.VersionFromContext(ctx) - if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { - pause = true - } - - c, _, _, err := runconfig.DecodeContainerConfig(r.Body) - if err != nil && err != io.EOF { //Do not fail if body is empty. - return err - } - if c == nil { - c = &container.Config{} - } - - if !s.daemon.Exists(cname) { - return derr.ErrorCodeNoSuchContainer.WithArgs(cname) - } - - newConfig, err := dockerfile.BuildFromConfig(c, r.Form["changes"]) - if err != nil { - return err - } - - commitCfg := &types.ContainerCommitConfig{ - Pause: pause, - Repo: r.Form.Get("repo"), - Tag: r.Form.Get("tag"), - Author: r.Form.Get("author"), - Comment: r.Form.Get("comment"), - Config: newConfig, - MergeConfigs: true, - } - - imgID, err := s.daemon.Commit(cname, commitCfg) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ - ID: string(imgID), - }) -} - -// Creates an image from Pull or from Import -func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - repo = r.Form.Get("repo") - tag = r.Form.Get("tag") - message = r.Form.Get("message") - ) - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } - - var ( - err error - output = ioutils.NewWriteFlusher(w) - ) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if image != "" { //pull - // Special case: "pull -a" may send an image name with a - // trailing :. This is ugly, but let's not break API - // compatibility. - image = strings.TrimSuffix(image, ":") - - var ref reference.Named - ref, err = reference.ParseNamed(image) - if err == nil { - if tag != "" { - // The "tag" could actually be a digest. - var dgst digest.Digest - dgst, err = digest.ParseDigest(tag) - if err == nil { - ref, err = reference.WithDigest(ref, dgst) - } else { - ref, err = reference.WithTag(ref, tag) - } - } - if err == nil { - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - err = s.daemon.PullImage(ref, metaHeaders, authConfig, output) - } - } - } else { //import - var newRef reference.Named - if repo != "" { - var err error - newRef, err = reference.ParseNamed(repo) - if err != nil { - return err - } - - if _, isCanonical := newRef.(reference.Canonical); isCanonical { - return errors.New("cannot import digest reference") - } - - if tag != "" { - newRef, err = reference.WithTag(newRef, tag) - if err != nil { - return err - } - } - } - - src := r.Form.Get("fromSrc") - - // 'err' MUST NOT be defined within this block, we need any error - // generated from the download to be available to the output - // stream processing below - var newConfig *container.Config - newConfig, err = dockerfile.BuildFromConfig(&container.Config{}, r.Form["changes"]) - if err != nil { - return err - } - - err = s.daemon.ImportImage(src, newRef, message, r.Body, output, newConfig) - } - if err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - - return nil -} - -func (s *router) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := httputils.ParseForm(r); err != nil { - return err - } - authConfig := &types.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) - } - } - - ref, err := reference.ParseNamed(vars["name"]) - if err != nil { - return err - } - tag := r.Form.Get("tag") - if tag != "" { - // Push by digest is not supported, so only tags are supported. - ref, err = reference.WithTag(ref, tag) - if err != nil { - return err - } - } - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if err := s.daemon.PushImage(ref, metaHeaders, authConfig, output); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil -} - -func (s *router) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - var names []string - if name, ok := vars["name"]; ok { - names = []string{name} - } else { - names = r.Form["names"] - } - - if err := s.daemon.ExportImage(names, output); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil -} - -func (s *router) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.daemon.LoadImage(r.Body, w) -} - -func (s *router) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - - if strings.TrimSpace(name) == "" { - return fmt.Errorf("image name cannot be blank") - } - - force := httputils.BoolValue(r, "force") - prune := !httputils.BoolValue(r, "noprune") - - list, err := s.daemon.ImageDelete(name, force, prune) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (s *router) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - imageInspect, err := s.daemon.LookupImage(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, imageInspect) -} - -func (s *router) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // FIXME: The filter parameter could just be a match filter - images, err := s.daemon.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, images) -} - -func (s *router) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - name := vars["name"] - history, err := s.daemon.ImageHistory(name) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, history) -} - -func (s *router) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - repo := r.Form.Get("repo") - tag := r.Form.Get("tag") - newTag, err := reference.WithName(repo) - if err != nil { - return err - } - if tag != "" { - if newTag, err = reference.WithTag(newTag, tag); err != nil { - return err - } - } - if err := s.daemon.TagImage(newTag, vars["name"]); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func (s *router) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - var ( - config *types.AuthConfig - authEncoded = r.Header.Get("X-Registry-Auth") - headers = map[string][]string{} - ) - - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(&config); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - config = &types.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - headers[k] = v - } - } - query, err := s.daemon.SearchRegistryForImages(r.Form.Get("term"), config, headers) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, query.Results) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/local.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/local.go deleted file mode 100644 index ed07f977..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/local/local.go +++ /dev/null @@ -1,107 +0,0 @@ -package local - -import ( - "github.com/docker/docker/api/server/httputils" - dkrouter "github.com/docker/docker/api/server/router" - "github.com/docker/docker/daemon" -) - -// router is a docker router that talks with the local docker daemon. -type router struct { - daemon *daemon.Daemon - routes []dkrouter.Route -} - -// localRoute defines an individual API route to connect with the docker daemon. -// It implements router.Route. -type localRoute struct { - method string - path string - handler httputils.APIFunc -} - -// Handler returns the APIFunc to let the server wrap it in middlewares -func (l localRoute) Handler() httputils.APIFunc { - return l.handler -} - -// Method returns the http method that the route responds to. -func (l localRoute) Method() string { - return l.method -} - -// Path returns the subpath where the route responds to. -func (l localRoute) Path() string { - return l.path -} - -// NewRoute initializes a new local router for the reouter -func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route { - return localRoute{method, path, handler} -} - -// NewGetRoute initializes a new route with the http method GET. -func NewGetRoute(path string, handler httputils.APIFunc) dkrouter.Route { - return NewRoute("GET", path, handler) -} - -// NewPostRoute initializes a new route with the http method POST. -func NewPostRoute(path string, handler httputils.APIFunc) dkrouter.Route { - return NewRoute("POST", path, handler) -} - -// NewPutRoute initializes a new route with the http method PUT. -func NewPutRoute(path string, handler httputils.APIFunc) dkrouter.Route { - return NewRoute("PUT", path, handler) -} - -// NewDeleteRoute initializes a new route with the http method DELETE. -func NewDeleteRoute(path string, handler httputils.APIFunc) dkrouter.Route { - return NewRoute("DELETE", path, handler) -} - -// NewOptionsRoute initializes a new route with the http method OPTIONS -func NewOptionsRoute(path string, handler httputils.APIFunc) dkrouter.Route { - return NewRoute("OPTIONS", path, handler) -} - -// NewHeadRoute initializes a new route with the http method HEAD. -func NewHeadRoute(path string, handler httputils.APIFunc) dkrouter.Route { - return NewRoute("HEAD", path, handler) -} - -// NewRouter initializes a local router with a new daemon. -func NewRouter(daemon *daemon.Daemon) dkrouter.Router { - r := &router{ - daemon: daemon, - } - r.initRoutes() - return r -} - -// Routes returns the list of routes registered in the router. -func (r *router) Routes() []dkrouter.Route { - return r.routes -} - -// initRoutes initializes the routes in this router -func (r *router) initRoutes() { - r.routes = []dkrouter.Route{ - // OPTIONS - // GET - NewGetRoute("/images/json", r.getImagesJSON), - NewGetRoute("/images/search", r.getImagesSearch), - NewGetRoute("/images/get", r.getImagesGet), - NewGetRoute("/images/{name:.*}/get", r.getImagesGet), - NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), - NewGetRoute("/images/{name:.*}/json", r.getImagesByName), - // POST - NewPostRoute("/commit", r.postCommit), - NewPostRoute("/images/create", r.postImagesCreate), - NewPostRoute("/images/load", r.postImagesLoad), - NewPostRoute("/images/{name:.*}/push", r.postImagesPush), - NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), - // DELETE - NewDeleteRoute("/images/{name:.*}", r.deleteImages), - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/backend.go deleted file mode 100644 index c6ea0adc..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/backend.go +++ /dev/null @@ -1,22 +0,0 @@ -package network - -import ( - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -// Backend is all the methods that need to be implemented to provide -// network specific functionality -type Backend interface { - FindNetwork(idName string) (libnetwork.Network, error) - GetNetwork(idName string, by int) (libnetwork.Network, error) - GetNetworksByID(partialID string) []libnetwork.Network - GetAllNetworks() []libnetwork.Network - CreateNetwork(name, driver string, ipam network.IPAM, - options map[string]string, internal bool) (libnetwork.Network, error) - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - DisconnectContainerFromNetwork(containerName string, - network libnetwork.Network, force bool) error - NetworkControllerEnabled() bool - DeleteNetwork(name string) error -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/filter.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/filter.go deleted file mode 100644 index 31d8d0c5..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/filter.go +++ /dev/null @@ -1,110 +0,0 @@ -package network - -import ( - "fmt" - "regexp" - "strings" - - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types/filters" - "github.com/docker/libnetwork" -) - -type filterHandler func([]libnetwork.Network, string) ([]libnetwork.Network, error) - -var ( - // supportedFilters predefined some supported filter handler function - supportedFilters = map[string]filterHandler{ - "type": filterNetworkByType, - "name": filterNetworkByName, - "id": filterNetworkByID, - } - - // acceptFilters is an acceptable filter flag list - // generated for validation. e.g. - // acceptedFilters = map[string]bool{ - // "type": true, - // "name": true, - // "id": true, - // } - acceptedFilters = func() map[string]bool { - ret := make(map[string]bool) - for k := range supportedFilters { - ret[k] = true - } - return ret - }() -) - -func filterNetworkByType(nws []libnetwork.Network, netType string) (retNws []libnetwork.Network, err error) { - switch netType { - case "builtin": - for _, nw := range nws { - if runconfig.IsPreDefinedNetwork(nw.Name()) { - retNws = append(retNws, nw) - } - } - case "custom": - for _, nw := range nws { - if !runconfig.IsPreDefinedNetwork(nw.Name()) { - retNws = append(retNws, nw) - } - } - default: - return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) - } - return retNws, nil -} - -func filterNetworkByName(nws []libnetwork.Network, name string) (retNws []libnetwork.Network, err error) { - for _, nw := range nws { - // exact match (fast path) - if nw.Name() == name { - retNws = append(retNws, nw) - continue - } - - // regexp match (slow path) - match, err := regexp.MatchString(name, nw.Name()) - if err != nil || !match { - continue - } else { - retNws = append(retNws, nw) - } - } - return retNws, nil -} - -func filterNetworkByID(nws []libnetwork.Network, id string) (retNws []libnetwork.Network, err error) { - for _, nw := range nws { - if strings.HasPrefix(nw.ID(), id) { - retNws = append(retNws, nw) - } - } - return retNws, nil -} - -// filterAllNetworks filter network list according to user specified filter -// and return user chosen networks -func filterNetworks(nws []libnetwork.Network, filter filters.Args) ([]libnetwork.Network, error) { - // if filter is empty, return original network list - if filter.Len() == 0 { - return nws, nil - } - - var displayNet []libnetwork.Network - for fkey, fhandler := range supportedFilters { - errFilter := filter.WalkValues(fkey, func(fval string) error { - passList, err := fhandler(nws, fval) - if err != nil { - return err - } - displayNet = append(displayNet, passList...) - return nil - }) - if errFilter != nil { - return nil, errFilter - } - } - return displayNet, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network.go deleted file mode 100644 index 5fcb252c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network.go +++ /dev/null @@ -1,56 +0,0 @@ -package network - -import ( - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/local" - "github.com/docker/docker/errors" - "golang.org/x/net/context" -) - -// networkRouter is a router to talk with the network controller -type networkRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new network router -func NewRouter(b Backend) router.Router { - r := &networkRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the network controller -func (r *networkRouter) Routes() []router.Route { - return r.routes -} - -func (r *networkRouter) initRoutes() { - r.routes = []router.Route{ - // GET - local.NewGetRoute("/networks", r.controllerEnabledMiddleware(r.getNetworksList)), - local.NewGetRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.getNetwork)), - // POST - local.NewPostRoute("/networks/create", r.controllerEnabledMiddleware(r.postNetworkCreate)), - local.NewPostRoute("/networks/{id:.*}/connect", r.controllerEnabledMiddleware(r.postNetworkConnect)), - local.NewPostRoute("/networks/{id:.*}/disconnect", r.controllerEnabledMiddleware(r.postNetworkDisconnect)), - // DELETE - local.NewDeleteRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.deleteNetwork)), - } -} - -func (r *networkRouter) controllerEnabledMiddleware(handler httputils.APIFunc) httputils.APIFunc { - if r.backend.NetworkControllerEnabled() { - return handler - } - return networkControllerDisabled -} - -func networkControllerDisabled(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return errors.ErrorNetworkControllerNotEnabled.WithArgs() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network_routes.go deleted file mode 100644 index 85fa88db..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/network/network_routes.go +++ /dev/null @@ -1,265 +0,0 @@ -package network - -import ( - "encoding/json" - "fmt" - "net/http" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/daemon" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - filter := r.Form.Get("filters") - netFilters, err := filters.FromParam(filter) - if err != nil { - return err - } - - if netFilters.Len() != 0 { - if err := netFilters.Validate(acceptedFilters); err != nil { - return err - } - } - - list := []*types.NetworkResource{} - - nwList := n.backend.GetAllNetworks() - displayable, err := filterNetworks(nwList, netFilters) - if err != nil { - return err - } - - for _, nw := range displayable { - list = append(list, buildNetworkResource(nw)) - } - - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw)) -} - -func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var create types.NetworkCreate - var warning string - - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&create); err != nil { - return err - } - - if runconfig.IsPreDefinedNetwork(create.Name) { - return httputils.WriteJSON(w, http.StatusForbidden, - fmt.Sprintf("%s is a pre-defined network and cannot be created", create.Name)) - } - - nw, err := n.backend.GetNetwork(create.Name, daemon.NetworkByName) - if _, ok := err.(libnetwork.ErrNoSuchNetwork); err != nil && !ok { - return err - } - if nw != nil { - if create.CheckDuplicate { - return libnetwork.NetworkNameError(create.Name) - } - warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) - } - - nw, err = n.backend.CreateNetwork(create.Name, create.Driver, create.IPAM, create.Options, create.Internal) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.NetworkCreateResponse{ - ID: nw.ID(), - Warning: warning, - }) -} - -func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var connect types.NetworkConnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - - return n.backend.ConnectContainerToNetwork(connect.Container, nw.Name(), connect.EndpointConfig) -} - -func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var disconnect types.NetworkDisconnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - - return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw, disconnect.Force) -} - -func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return n.backend.DeleteNetwork(vars["id"]) -} - -func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { - r := &types.NetworkResource{} - if nw == nil { - return r - } - - r.Name = nw.Name() - r.ID = nw.ID() - r.Scope = nw.Info().Scope() - r.Driver = nw.Type() - r.Options = nw.Info().DriverOptions() - r.Containers = make(map[string]types.EndpointResource) - buildIpamResources(r, nw) - - epl := nw.Endpoints() - for _, e := range epl { - ei := e.Info() - if ei == nil { - continue - } - sb := ei.Sandbox() - if sb == nil { - continue - } - - r.Containers[sb.ContainerID()] = buildEndpointResource(e) - } - return r -} - -func buildIpamResources(r *types.NetworkResource, nw libnetwork.Network) { - id, opts, ipv4conf, ipv6conf := nw.Info().IpamConfig() - - ipv4Info, ipv6Info := nw.Info().IpamInfo() - - r.IPAM.Driver = id - - r.IPAM.Options = opts - - r.IPAM.Config = []network.IPAMConfig{} - for _, ip4 := range ipv4conf { - if ip4.PreferredPool == "" { - continue - } - iData := network.IPAMConfig{} - iData.Subnet = ip4.PreferredPool - iData.IPRange = ip4.SubPool - iData.Gateway = ip4.Gateway - iData.AuxAddress = ip4.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if len(r.IPAM.Config) == 0 { - for _, ip4Info := range ipv4Info { - iData := network.IPAMConfig{} - iData.Subnet = ip4Info.IPAMData.Pool.String() - iData.Gateway = ip4Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } - - hasIpv6Conf := false - for _, ip6 := range ipv6conf { - if ip6.PreferredPool == "" { - continue - } - hasIpv6Conf = true - iData := network.IPAMConfig{} - iData.Subnet = ip6.PreferredPool - iData.IPRange = ip6.SubPool - iData.Gateway = ip6.Gateway - iData.AuxAddress = ip6.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if !hasIpv6Conf { - for _, ip6Info := range ipv6Info { - iData := network.IPAMConfig{} - iData.Subnet = ip6Info.IPAMData.Pool.String() - iData.Gateway = ip6Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } -} - -func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource { - er := types.EndpointResource{} - if e == nil { - return er - } - - er.EndpointID = e.ID() - er.Name = e.Name() - ei := e.Info() - if ei == nil { - return er - } - - if iface := ei.Iface(); iface != nil { - if mac := iface.MacAddress(); mac != nil { - er.MacAddress = mac.String() - } - if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { - er.IPv4Address = ip.String() - } - - if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { - er.IPv6Address = ipv6.String() - } - } - return er -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/router.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/router.go deleted file mode 100644 index f3efa82f..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/router.go +++ /dev/null @@ -1,18 +0,0 @@ -package router - -import "github.com/docker/docker/api/server/httputils" - -// Router defines an interface to specify a group of routes to add the the docker server. -type Router interface { - Routes() []Route -} - -// Route defines an individual API route in the docker server. -type Route interface { - // Handler returns the raw function to create the http handler. - Handler() httputils.APIFunc - // Method returns the http method that the route responds to. - Method() string - // Path returns the subpath where the route responds to. - Path() string -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/backend.go deleted file mode 100644 index 8a270027..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/backend.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" -) - -// Backend is the methods that need to be implemented to provide -// system specific functionality. -type Backend interface { - SystemInfo() (*types.Info, error) - SystemVersion() types.Version - SubscribeToEvents(since, sinceNano int64, ef filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(chan interface{}) - AuthenticateToRegistry(authConfig *types.AuthConfig) (string, error) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system.go deleted file mode 100644 index 0f46eda2..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system.go +++ /dev/null @@ -1,37 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/local" -) - -// systemRouter is a Router that provides information about -// the Docker system overall. It gathers information about -// host, daemon and container events. -type systemRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new systemRouter -func NewRouter(b Backend) router.Router { - r := &systemRouter{ - backend: b, - } - - r.routes = []router.Route{ - local.NewOptionsRoute("/", optionsHandler), - local.NewGetRoute("/_ping", pingHandler), - local.NewGetRoute("/events", r.getEvents), - local.NewGetRoute("/info", r.getInfo), - local.NewGetRoute("/version", r.getVersion), - local.NewPostRoute("/auth", r.postAuth), - } - - return r -} - -// Routes return all the API routes dedicated to the docker system. -func (s *systemRouter) Routes() []router.Route { - return s.routes -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system_routes.go deleted file mode 100644 index 40ec748a..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/system/system_routes.go +++ /dev/null @@ -1,132 +0,0 @@ -package system - -import ( - "encoding/json" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - timetypes "github.com/docker/engine-api/types/time" - "golang.org/x/net/context" -) - -func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} - -func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - _, err := w.Write([]byte{'O', 'K'}) - return err -} - -func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info, err := s.backend.SystemInfo() - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info := s.backend.SystemVersion() - info.APIVersion = api.DefaultVersion.String() - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) - if err != nil { - return err - } - until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) - if err != nil { - return err - } - - timer := time.NewTimer(0) - timer.Stop() - if until > 0 || untilNano > 0 { - dur := time.Unix(until, untilNano).Sub(time.Now()) - timer = time.NewTimer(dur) - } - - ef, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - w.Header().Set("Content-Type", "application/json") - - // This is to ensure that the HTTP status code is sent immediately, - // so that it will not block the receiver. - w.WriteHeader(http.StatusOK) - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - - enc := json.NewEncoder(output) - - buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) - defer s.backend.UnsubscribeFromEvents(l) - - for _, ev := range buffered { - if err := enc.Encode(ev); err != nil { - return err - } - } - - var closeNotify <-chan bool - if closeNotifier, ok := w.(http.CloseNotifier); ok { - closeNotify = closeNotifier.CloseNotify() - } - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - logrus.Warnf("unexpected event message: %q", ev) - continue - } - if err := enc.Encode(jev); err != nil { - return err - } - case <-timer.C: - return nil - case <-closeNotify: - logrus.Debug("Client disconnected, stop sending events") - return nil - } - } -} - -func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config *types.AuthConfig - err := json.NewDecoder(r.Body).Decode(&config) - r.Body.Close() - if err != nil { - return err - } - status, err := s.backend.AuthenticateToRegistry(config) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ - Status: status, - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/backend.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/backend.go deleted file mode 100644 index ede5dc4d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/backend.go +++ /dev/null @@ -1,16 +0,0 @@ -package volume - -import ( - // TODO return types need to be refactored into pkg - "github.com/docker/engine-api/types" -) - -// Backend is the methods that need to be implemented to provide -// volume specific functionality -type Backend interface { - Volumes(filter string) ([]*types.Volume, []string, error) - VolumeInspect(name string) (*types.Volume, error) - VolumeCreate(name, driverName string, - opts map[string]string) (*types.Volume, error) - VolumeRm(name string) error -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume.go deleted file mode 100644 index 8bd5c129..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume.go +++ /dev/null @@ -1,38 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/local" -) - -// volumeRouter is a router to talk with the volumes controller -type volumeRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new volumeRouter -func NewRouter(b Backend) router.Router { - r := &volumeRouter{ - backend: b, - } - r.initRoutes() - return r -} - -//Routes returns the available routers to the volumes controller -func (r *volumeRouter) Routes() []router.Route { - return r.routes -} - -func (r *volumeRouter) initRoutes() { - r.routes = []router.Route{ - // GET - local.NewGetRoute("/volumes", r.getVolumesList), - local.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), - // POST - local.NewPostRoute("/volumes/create", r.postVolumesCreate), - // DELETE - local.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume_routes.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume_routes.go deleted file mode 100644 index feef77cb..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router/volume/volume_routes.go +++ /dev/null @@ -1,66 +0,0 @@ -package volume - -import ( - "encoding/json" - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) -} - -func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volume, err := v.backend.VolumeInspect(vars["name"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, volume) -} - -func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - var req types.VolumeCreateRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - - volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusCreated, volume) -} - -func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := v.backend.VolumeRm(vars["name"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/router_swapper.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/router_swapper.go deleted file mode 100644 index b5f1d06d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/router_swapper.go +++ /dev/null @@ -1,30 +0,0 @@ -package server - -import ( - "net/http" - "sync" - - "github.com/gorilla/mux" -) - -// routerSwapper is an http.Handler that allow you to swap -// mux routers. -type routerSwapper struct { - mu sync.Mutex - router *mux.Router -} - -// Swap changes the old router with the new one. -func (rs *routerSwapper) Swap(newRouter *mux.Router) { - rs.mu.Lock() - rs.router = newRouter - rs.mu.Unlock() -} - -// ServeHTTP makes the routerSwapper to implement the http.Handler interface. -func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { - rs.mu.Lock() - router := rs.router - rs.mu.Unlock() - router.ServeHTTP(w, r) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go deleted file mode 100644 index aa7905b4..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go +++ /dev/null @@ -1,243 +0,0 @@ -package server - -import ( - "crypto/tls" - "net" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/build" - "github.com/docker/docker/api/server/router/container" - "github.com/docker/docker/api/server/router/local" - "github.com/docker/docker/api/server/router/network" - "github.com/docker/docker/api/server/router/system" - "github.com/docker/docker/api/server/router/volume" - "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/utils" - "github.com/docker/go-connections/sockets" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// versionMatcher defines a variable matcher to be parsed by the router -// when a request is about to be served. -const versionMatcher = "/v{version:[0-9.]+}" - -// Config provides the configuration for the API server -type Config struct { - Logging bool - EnableCors bool - CorsHeaders string - AuthorizationPluginNames []string - Version string - SocketGroup string - TLSConfig *tls.Config - Addrs []Addr -} - -// Server contains instance details for the server -type Server struct { - cfg *Config - servers []*HTTPServer - routers []router.Router - authZPlugins []authorization.Plugin - routerSwapper *routerSwapper -} - -// Addr contains string representation of address and its protocol (tcp, unix...). -type Addr struct { - Proto string - Addr string -} - -// New returns a new instance of the server based on the specified configuration. -// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). -func New(cfg *Config) (*Server, error) { - s := &Server{ - cfg: cfg, - } - for _, addr := range cfg.Addrs { - srv, err := s.newServer(addr.Proto, addr.Addr) - if err != nil { - return nil, err - } - logrus.Debugf("Server created for HTTP on %s (%s)", addr.Proto, addr.Addr) - s.servers = append(s.servers, srv...) - } - return s, nil -} - -// Close closes servers and thus stop receiving requests -func (s *Server) Close() { - for _, srv := range s.servers { - if err := srv.Close(); err != nil { - logrus.Error(err) - } - } -} - -// serveAPI loops through all initialized servers and spawns goroutine -// with Server method for each. It sets createMux() as Handler also. -func (s *Server) serveAPI() error { - s.initRouterSwapper() - - var chErrors = make(chan error, len(s.servers)) - for _, srv := range s.servers { - srv.srv.Handler = s.routerSwapper - go func(srv *HTTPServer) { - var err error - logrus.Infof("API listen on %s", srv.l.Addr()) - if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }(srv) - } - - for i := 0; i < len(s.servers); i++ { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -// HTTPServer contains an instance of http server and the listener. -// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. -// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. -type HTTPServer struct { - srv *http.Server - l net.Listener -} - -// Serve starts listening for inbound requests. -func (s *HTTPServer) Serve() error { - return s.srv.Serve(s.l) -} - -// Close closes the HTTPServer from listening for the inbound requests. -func (s *HTTPServer) Close() error { - return s.l.Close() -} - -func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { - logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) - w.Header().Add("Access-Control-Allow-Origin", corsHeaders) - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") - w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") -} - -func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) { - if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert { - logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } - if l, err = sockets.NewTCPSocket(addr, s.cfg.TLSConfig); err != nil { - return nil, err - } - if err := allocateDaemonPort(addr); err != nil { - return nil, err - } - return -} - -func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // log the handler call - logrus.Debugf("Calling %s %s", r.Method, r.URL.Path) - - // Define the context that we'll pass around to share info - // like the docker-request-id. - // - // The 'context' will be used for global data that should - // apply to all requests. Data that is specific to the - // immediate function being called should still be passed - // as 'args' on the function call. - ctx := context.Background() - handlerFunc := s.handleWithGlobalMiddlewares(handler) - - vars := mux.Vars(r) - if vars == nil { - vars = make(map[string]string) - } - - if err := handlerFunc(ctx, w, r, vars); err != nil { - logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL.Path, utils.GetErrorMessage(err)) - httputils.WriteError(w, err) - } - } -} - -// InitRouters initializes a list of routers for the server. -func (s *Server) InitRouters(d *daemon.Daemon) { - s.addRouter(container.NewRouter(d)) - s.addRouter(local.NewRouter(d)) - s.addRouter(network.NewRouter(d)) - s.addRouter(system.NewRouter(d)) - s.addRouter(volume.NewRouter(d)) - s.addRouter(build.NewRouter(d)) -} - -// addRouter adds a new router to the server. -func (s *Server) addRouter(r router.Router) { - s.routers = append(s.routers, r) -} - -// createMux initializes the main router the server uses. -func (s *Server) createMux() *mux.Router { - m := mux.NewRouter() - if utils.IsDebugEnabled() { - profilerSetup(m, "/debug/") - } - - logrus.Debugf("Registering routers") - for _, apiRouter := range s.routers { - for _, r := range apiRouter.Routes() { - f := s.makeHTTPHandler(r.Handler()) - - logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) - m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) - m.Path(r.Path()).Methods(r.Method()).Handler(f) - } - } - - return m -} - -// Wait blocks the server goroutine until it exits. -// It sends an error message if there is any error during -// the API execution. -func (s *Server) Wait(waitChan chan error) { - if err := s.serveAPI(); err != nil { - logrus.Errorf("ServeAPI error: %v", err) - waitChan <- err - return - } - waitChan <- nil -} - -func (s *Server) initRouterSwapper() { - s.routerSwapper = &routerSwapper{ - router: s.createMux(), - } -} - -// Reload reads configuration changes and modifies the -// server according to those changes. -// Currently, only the --debug configuration is taken into account. -func (s *Server) Reload(config *daemon.Config) { - debugEnabled := utils.IsDebugEnabled() - switch { - case debugEnabled && !config.Debug: // disable debug - utils.DisableDebug() - s.routerSwapper.Swap(s.createMux()) - case config.Debug && !debugEnabled: // enable debug - utils.EnableDebug() - s.routerSwapper.Swap(s.createMux()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go deleted file mode 100644 index a4fc6395..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unix.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build freebsd linux - -package server - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/go-connections/sockets" - "github.com/docker/libnetwork/portallocator" - - systemdActivation "github.com/coreos/go-systemd/activation" -) - -// newServer sets up the required HTTPServers and does protocol specific checking. -// newServer does not set any muxers, you should set it later to Handler field -func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { - var ( - err error - ls []net.Listener - ) - switch proto { - case "fd": - ls, err = listenFD(addr, s.cfg.TLSConfig) - if err != nil { - return nil, err - } - case "tcp": - l, err := s.initTCPSocket(addr) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - l, err := sockets.NewUnixSocket(addr, s.cfg.SocketGroup) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("Invalid protocol format: %q", proto) - } - var res []*HTTPServer - for _, l := range ls { - res = append(res, &HTTPServer{ - &http.Server{ - Addr: addr, - }, - l, - }) - } - return res, nil -} - -func allocateDaemonPort(addr string) error { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - intPort, err := strconv.Atoi(port) - if err != nil { - return err - } - - var hostIPs []net.IP - if parsedIP := net.ParseIP(host); parsedIP != nil { - hostIPs = append(hostIPs, parsedIP) - } else if hostIPs, err = net.LookupIP(host); err != nil { - return fmt.Errorf("failed to lookup %s address in host specification", host) - } - - pa := portallocator.Get() - for _, hostIP := range hostIPs { - if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { - return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) - } - } - return nil -} - -// listenFD returns the specified socket activated files as a slice of -// net.Listeners or all of the activated files if "*" is given. -func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { - var ( - err error - listeners []net.Listener - ) - // socket activation - if tlsConfig != nil { - listeners, err = systemdActivation.TLSListeners(false, tlsConfig) - } else { - listeners, err = systemdActivation.Listeners(false) - } - if err != nil { - return nil, err - } - - if len(listeners) == 0 { - return nil, fmt.Errorf("No sockets found") - } - - // default to all fds just like unix:// and tcp:// - if addr == "" || addr == "*" { - return listeners, nil - } - - fdNum, err := strconv.Atoi(addr) - if err != nil { - return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) - } - fdOffset := fdNum - 3 - if len(listeners) < int(fdOffset)+1 { - return nil, fmt.Errorf("Too few socket activated files passed in") - } - if listeners[fdOffset] == nil { - return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) - } - for i, ls := range listeners { - if i == fdOffset || ls == nil { - continue - } - if err := ls.Close(); err != nil { - logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) - } - } - return []net.Listener{listeners[fdOffset]}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go deleted file mode 100644 index 826dd2e0..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package server - -import ( - "errors" - "net" - "net/http" -) - -// NewServer sets up the required Server and does protocol specific checking. -func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { - var ( - ls []net.Listener - ) - switch proto { - case "tcp": - l, err := s.initTCPSocket(addr) - if err != nil { - return nil, err - } - ls = append(ls, l) - - default: - return nil, errors.New("Invalid protocol format. Windows only supports tcp.") - } - - var res []*HTTPServer - for _, l := range ls { - res = append(res, &HTTPServer{ - &http.Server{ - Addr: addr, - }, - l, - }) - } - return res, nil - -} - -func allocateDaemonPort(addr string) error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go deleted file mode 100644 index 51054fa6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go +++ /dev/null @@ -1,557 +0,0 @@ -// +build linux - -/* - -aufs driver directory structure - - . - ├── layers // Metadata of layers - │ ├── 1 - │ ├── 2 - │ └── 3 - ├── diff // Content of the layer - │ ├── 1 // Contains layers that need to be mounted for the id - │ ├── 2 - │ └── 3 - └── mnt // Mount points for the rw layers to be mounted - ├── 1 - ├── 2 - └── 3 - -*/ - -package aufs - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "strings" - "sync" - "syscall" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/idtools" - mountpk "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" - - "github.com/opencontainers/runc/libcontainer/label" -) - -var ( - // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") - incompatibleFsMagic = []graphdriver.FsMagic{ - graphdriver.FsMagicBtrfs, - graphdriver.FsMagicAufs, - } - backingFs = "" - - enableDirpermLock sync.Once - enableDirperm bool -) - -func init() { - graphdriver.Register("aufs", Init) -} - -type data struct { - referenceCount int - path string -} - -// Driver contains information about the filesystem mounted. -// root of the filesystem -// sync.Mutex to protect against concurrent modifications -// active maps mount id to the count -type Driver struct { - root string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - sync.Mutex // Protects concurrent modification to active - active map[string]*data -} - -// Init returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - fsMagic, err := graphdriver.GetFSMagic(root) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - for _, magic := range incompatibleFsMagic { - if fsMagic == magic { - return nil, graphdriver.ErrIncompatibleFS - } - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: root, - active: make(map[string]*data), - uidMaps: uidMaps, - gidMaps: gidMaps, - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - if err := mountpk.MakePrivate(root); err != nil { - return nil, err - } - - // Populate the dir structure - for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { - return nil, err - } - } - return a, nil -} - -// Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run -func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a *Driver) rootPath() string { - return a.root -} - -func (*Driver) String() string { - return "aufs" -} - -// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. -func (a *Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Backing Filesystem", backingFs}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, - } -} - -// GetMetadata not implemented -func (a *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Exists returns true if the given id is registered with -// this driver -func (a *Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// Create three folders for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent, mountLabel string) error { - if err := a.createDirsFor(id); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIds(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - a.active[id] = &data{} - return nil -} - -func (a *Driver) createDirsFor(id string) error { - paths := []string{ - "mnt", - "diff", - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) - if err != nil { - return err - } - for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { - return err - } - } - return nil -} - -// Remove will unmount and remove the given id. -func (a *Driver) Remove(id string) error { - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - m := a.active[id] - if m != nil { - if m.referenceCount > 0 { - return nil - } - // Make sure the dir is umounted first - if err := a.unmount(m); err != nil { - return err - } - } - tmpDirs := []string{ - "mnt", - "diff", - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that docker doesn't find it anymore) before doing removal of - // the whole tree. - for _, p := range tmpDirs { - realPath := path.Join(a.rootPath(), p, id) - tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) - if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { - return err - } - defer os.RemoveAll(tmpPath) - } - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get returns the rootfs path for the id. -// This will mount the dir at it's given path -func (a *Driver) Get(id, mountLabel string) (string, error) { - ids, err := getParentIds(a.rootPath(), id) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - ids = []string{} - } - - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - m := a.active[id] - if m == nil { - m = &data{} - a.active[id] = m - } - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - m.path = path.Join(a.rootPath(), "diff", id) - if len(ids) > 0 { - m.path = path.Join(a.rootPath(), "mnt", id) - if m.referenceCount == 0 { - if err := a.mount(id, m, mountLabel); err != nil { - return "", err - } - } - } - m.referenceCount++ - return m.path, nil -} - -// Put unmounts and updates list of active mounts. -func (a *Driver) Put(id string) error { - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - m := a.active[id] - if m == nil { - // but it might be still here - if a.Exists(id) { - path := path.Join(a.rootPath(), "mnt", id) - err := Unmount(path) - if err != nil { - logrus.Debugf("Failed to unmount %s aufs: %v", id, err) - } - } - return nil - } - if count := m.referenceCount; count > 1 { - m.referenceCount = count - 1 - } else { - ids, _ := getParentIds(a.rootPath(), id) - // We only mounted if there are any parents - if ids != nil && len(ids) > 0 { - a.unmount(m) - } - delete(a.active, id) - } - return nil -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent string) (archive.Archive, error) { - // AUFS doesn't need the parent layer to produce a diff. - return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -// DiffPath returns path to the directory that contains files for the layer -// differences. Used for direct access for tar-split. -func (a *Driver) DiffPath(id string) (string, func() error, error) { - return path.Join(a.rootPath(), "diff", id), func() error { return nil }, nil -} - -func (a *Driver) applyDiff(id string, diff archive.Reader) error { - return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (a *Driver) DiffSize(id, parent string) (size int64, err error) { - // AUFS doesn't need the parent layer to calculate the diff size. - return directory.Size(path.Join(a.rootPath(), "diff", id)) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { - // AUFS doesn't need the parent id to apply the diff. - if err = a.applyDiff(id, diff); err != nil { - return - } - - return a.DiffSize(id, parent) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { - // AUFS doesn't have snapshots, so we need to get changes from all parent - // layers. - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIds(a.rootPath(), id) - if err != nil { - return nil, err - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string, m *data, mountLabel string) error { - // If the id is mounted or we get an error return - if mounted, err := a.mounted(m); err != nil || mounted { - return err - } - - var ( - target = m.path - rw = path.Join(a.rootPath(), "diff", id) - ) - - layers, err := a.getParentLayerPaths(id) - if err != nil { - return err - } - - if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { - return fmt.Errorf("error creating aufs mount to %s: %v", target, err) - } - return nil -} - -func (a *Driver) unmount(m *data) error { - if mounted, err := a.mounted(m); err != nil || !mounted { - return err - } - return Unmount(m.path) -} - -func (a *Driver) mounted(m *data) (bool, error) { - return mountpk.Mounted(m.path) -} - -// Cleanup aufs and unmount all mountpoints -func (a *Driver) Cleanup() error { - for id, m := range a.active { - if err := a.unmount(m); err != nil { - logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err) - } - } - return mountpk.Unmount(a.root) -} - -func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { - defer func() { - if err != nil { - Unmount(target) - } - }() - - // Mount options are clipped to page size(4096 bytes). If there are more - // layers then these are remounted individually using append. - - offset := 54 - if useDirperm() { - offset += len("dirperm1") - } - b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel - bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - - firstMount := true - i := 0 - - for { - for ; i < len(ro); i++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[i]) - - if firstMount { - if bp+len(layer) > len(b) { - break - } - bp += copy(b[bp:], layer) - } else { - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { - return - } - } - } - - if firstMount { - opts := "dio,xino=/dev/shm/aufs.xino" - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } - firstMount = false - } - - if i == len(ro) { - break - } - } - - return -} - -// useDirperm checks dirperm1 mount option can be used with the current -// version of aufs. -func useDirperm() bool { - enableDirpermLock.Do(func() { - base, err := ioutil.TempDir("", "docker-aufs-base") - if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(base) - - union, err := ioutil.TempDir("", "docker-aufs-union") - if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(union) - - opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) - if err := mount("none", union, "aufs", 0, opts); err != nil { - return - } - enableDirperm = true - if err := Unmount(union); err != nil { - logrus.Errorf("error checking dirperm1: failed to unmount %v", err) - } - }) - return enableDirperm -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go deleted file mode 100644 index 08f1ffc0..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux - -package aufs - -import ( - "bufio" - "io/ioutil" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - out := []string{} - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIds(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - out := []string{} - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go deleted file mode 100644 index d7e9bf9f..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux - -package aufs - -import ( - "os/exec" - "syscall" - - "github.com/Sirupsen/logrus" -) - -// Unmount the target specified. -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - logrus.Errorf("Couldn't run auplink before unmount: %s", err) - } - if err := syscall.Unmount(target, 0); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go deleted file mode 100644 index 8062bae4..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package aufs - -import "syscall" - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go deleted file mode 100644 index d030b066..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package aufs - -import "errors" - -// MsRemount declared to specify a non-linux system mount. -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on this platform") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go deleted file mode 100644 index 86aa631b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build linux - -package btrfs - -/* -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "os" - "path" - "path/filepath" - "syscall" - "unsafe" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/label" -) - -func init() { - graphdriver.Register("btrfs", Init) -} - -// Init returns a new BTRFS driver. -// An error is returned if BTRFS is not supported. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - rootdir := path.Dir(home) - - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return nil, err - } - - if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { - return nil, graphdriver.ErrPrerequisites - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - driver := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - - return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// String prints the name of the driver (btrfs). -func (d *Driver) String() string { - return "btrfs" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Build Version" and "Library Version" of the btrfs libraries used. -// Version information can be used to check compatibility with your kernel. -func (d *Driver) Status() [][2]string { - status := [][2]string{} - if bv := btrfsBuildVersion(); bv != "-" { - status = append(status, [2]string{"Build Version", bv}) - } - if lv := btrfsLibVersion(); lv != -1 { - status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) - } - return status -} - -// GetMetadata returns empty metadata for this driver. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup unmounts the home directory. -func (d *Driver) Cleanup() error { - return mount.Unmount(d.home) -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -func subvolCreate(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) - } - return nil -} - -func subvolSnapshot(src, dest, name string) error { - srcDir, err := openDir(src) - if err != nil { - return err - } - defer closeDir(srcDir) - - destDir, err := openDir(dest) - if err != nil { - return err - } - defer closeDir(destDir) - - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(getDirFd(srcDir)) - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func isSubvolume(p string) (bool, error) { - var bufStat syscall.Stat_t - if err := syscall.Lstat(p, &bufStat); err != nil { - return false, err - } - - // return true if it is a btrfs subvolume - return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil -} - -func subvolDelete(dirpath, name string) error { - dir, err := openDir(dirpath) - if err != nil { - return err - } - defer closeDir(dir) - fullPath := path.Join(dirpath, name) - - var args C.struct_btrfs_ioctl_vol_args - - // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) && p != fullPath { - // missing most likely because the path was a subvolume that got removed in the previous iteration - // since it's gone anyway, we don't care - return nil - } - return fmt.Errorf("error walking subvolumes: %v", err) - } - // we want to check children only so skip itself - // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { - sv, err := isSubvolume(p) - if err != nil { - return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) - } - if sv { - if err := subvolDelete(path.Dir(p), f.Name()); err != nil { - return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) - } - } - } - return nil - } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { - return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) - } - - // all subvolumes have been removed - // now remove the one originally passed in - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) - } - return nil -} - -func (d *Driver) subvolumesDir() string { - return path.Join(d.home, "subvolumes") -} - -func (d *Driver) subvolumesDirID(id string) string { - return path.Join(d.subvolumesDir(), id) -} - -// Create the filesystem with given id. -func (d *Driver) Create(id, parent, mountLabel string) error { - subvolumes := path.Join(d.home, "subvolumes") - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { - return err - } - if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { - return err - } - } else { - parentDir, err := d.Get(parent, "") - if err != nil { - return err - } - if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { - return err - } - } - - // if we have a remapped root (user namespaces enabled), change the created snapshot - // dir ownership to match - if rootUID != 0 || rootGID != 0 { - if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { - return err - } - } - - return label.Relabel(path.Join(subvolumes, id), mountLabel, false) -} - -// Remove the filesystem with given id. -func (d *Driver) Remove(id string) error { - dir := d.subvolumesDirID(id) - if _, err := os.Stat(dir); err != nil { - return err - } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { - return err - } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get the requested filesystem id. -func (d *Driver) Get(id, mountLabel string) (string, error) { - dir := d.subvolumesDirID(id) - st, err := os.Stat(dir) - if err != nil { - return "", err - } - - if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - - return dir, nil -} - -// Put is not implemented for BTRFS as there is no cleanup required for the id. -func (d *Driver) Put(id string) error { - // Get() creates no runtime resources (like e.g. mounts) - // so this doesn't need to do anything. - return nil -} - -// Exists checks if the id exists in the filesystem. -func (d *Driver) Exists(id string) bool { - dir := d.subvolumesDirID(id) - _, err := os.Stat(dir) - return err == nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go deleted file mode 100644 index f0708888..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux !cgo - -package btrfs diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version.go deleted file mode 100644 index 73d90cdd..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux,!btrfs_noversion - -package btrfs - -/* -#include - -// around version 3.16, they did not define lib version yet -#ifndef BTRFS_LIB_VERSION -#define BTRFS_LIB_VERSION -1 -#endif - -// upstream had removed it, but now it will be coming back -#ifndef BTRFS_BUILD_VERSION -#define BTRFS_BUILD_VERSION "-" -#endif -*/ -import "C" - -func btrfsBuildVersion() string { - return string(C.BTRFS_BUILD_VERSION) -} - -func btrfsLibVersion() int { - return int(C.BTRFS_LIB_VERSION) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go deleted file mode 100644 index f802fbc6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build linux,btrfs_noversion - -package btrfs - -// TODO(vbatts) remove this work-around once supported linux distros are on -// btrfs utilities of >= 3.16.1 - -func btrfsBuildVersion() string { - return "-" -} - -func btrfsLibVersion() int { - return -1 -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md deleted file mode 100644 index fa9a501c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md +++ /dev/null @@ -1,97 +0,0 @@ -## devicemapper - a storage backend based on Device Mapper - -### Theory of operation - -The device mapper graphdriver uses the device mapper thin provisioning -module (dm-thinp) to implement CoW snapshots. The preferred model is -to have a thin pool reserved outside of Docker and passed to the -daemon via the `--storage-opt dm.thinpooldev` option. - -As a fallback if no thin pool is provided, loopback files will be -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Docker daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -In loopback, a thin pool is created at `/var/lib/docker/devicemapper` -(devicemapper graph location) based on two block devices, one for -data and one for metadata. By default these block devices are created -automatically by using loopback mounts of automatically created sparse -files. - -The default loopback files used are -`/var/lib/docker/devicemapper/devicemapper/data` and -`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata -required to map from docker entities to the corresponding devicemapper -volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` -file (encoded as Json). - -In order to support multiple devicemapper graphs on a system, the thin -pool will be named something like: `docker-0:33-19478248-pool`, where -the `0:33` part is the minor/major device nr and `19478248` is the -inode number of the `/var/lib/docker/devicemapper` directory. - -On the thin pool, docker automatically creates a base thin device, -called something like `docker-0:33-19478248-base` of a fixed -size. This is automatically formatted with an empty filesystem on -creation. This device is the base of all docker images and -containers. All base images are snapshots of this device and those -images are then in turn used as snapshots for other images and -eventually containers. - -### Information on `docker info` - -As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver -will display something like: - - $ sudo docker info - [...] - Storage Driver: devicemapper - Pool Name: docker-253:1-17538953-pool - Pool Blocksize: 65.54 kB - Base Device Size: 107.4 GB - Data file: /dev/loop4 - Metadata file: /dev/loop4 - Data Space Used: 2.536 GB - Data Space Total: 107.4 GB - Data Space Available: 104.8 GB - Metadata Space Used: 7.93 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.14 GB - Udev Sync Supported: true - Data loop file: /home/docker/devicemapper/devicemapper/data - Metadata loop file: /home/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.82-git (2013-10-04) - [...] - -#### status items - -Each item in the indented section under `Storage Driver: devicemapper` are -status information about the driver. - * `Pool Name` name of the devicemapper pool for this driver. - * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. - * `Base Device Size` tells the maximum size of a container and image - * `Data file` blockdevice file used for the devicemapper data - * `Metadata file` blockdevice file used for the devicemapper metadata - * `Data Space Used` tells how much of `Data file` is currently used - * `Data Space Total` tells max size the `Data file` - * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. - * `Metadata Space Used` tells how much of `Metadata file` is currently used - * `Metadata Space Total` tells max size the `Metadata file` - * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. - * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. - * `Data loop file` file attached to `Data file`, if loopback device is used - * `Metadata loop file` file attached to `Metadata file`, if loopback device is used - * `Library Version` from the libdevmapper used - -### About the devicemapper options - -The devicemapper backend supports some options that you can specify -when starting the docker daemon using the `--storage-opt` flags. -This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. - -These options are currently documented both in [the man -page](../../../man/docker.1.md) and in [the online -documentation](https://docs.docker.com/reference/commandline/daemon/#docker- -execdriver-option). If you add an options, update both the `man` page and the -documentation. diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go deleted file mode 100644 index d8522349..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go +++ /dev/null @@ -1,2530 +0,0 @@ -// +build linux - -package devmapper - -import ( - "bufio" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/devicemapper" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/loopback" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/go-units" - - "github.com/opencontainers/runc/libcontainer/label" -) - -var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - // We retry device removal so many a times that even error messages - // will fill up console during normal operation. So only log Fatal - // messages by default. - logLevel = devicemapper.LogLevelFatal - driverDeferredRemovalSupport = false - enableDeferredRemoval = false - enableDeferredDeletion = false - userBaseSize = false -) - -const deviceSetMetaFile string = "deviceset-metadata" -const transactionMetaFile string = "transaction-metadata" - -type transaction struct { - OpenTransactionID uint64 `json:"open_transaction_id"` - DeviceIDHash string `json:"device_hash"` - DeviceID int `json:"device_id"` -} - -type devInfo struct { - Hash string `json:"-"` - DeviceID int `json:"device_id"` - Size uint64 `json:"size"` - TransactionID uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - Deleted bool `json:"deleted"` - devices *DeviceSet - - mountCount int - mountPath string - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - // - // WARNING: In order to avoid AB-BA deadlocks when releasing - // the global lock while holding the per-device locks all - // device locks must be acquired *before* the device lock, and - // multiple device locks should be acquired parent before child. - lock sync.Mutex -} - -type metaData struct { - Devices map[string]*devInfo `json:"Devices"` -} - -// DeviceSet holds information about list of devices -type DeviceSet struct { - metaData `json:"-"` - sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper - root string - devicePrefix string - TransactionID uint64 `json:"-"` - NextDeviceID int `json:"next_device_id"` - deviceIDMap []byte - - // Options - dataLoopbackSize int64 - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - transaction `json:"-"` - overrideUdevSyncCheck bool - deferredRemove bool // use deferred removal - deferredDelete bool // use deferred deletion - BaseDeviceUUID string // save UUID of base device - BaseDeviceFilesystem string // save filesystem of base device - nrDeletedDevices uint // number of deleted devices - deletionWorkerTicker *time.Ticker - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// DiskUsage contains information about disk usage and is used when reporting Status of a device. -type DiskUsage struct { - // Used bytes on the disk. - Used uint64 - // Total bytes on the disk. - Total uint64 - // Available bytes on the disk. - Available uint64 -} - -// Status returns the information about the device. -type Status struct { - // PoolName is the name of the data pool. - PoolName string - // DataFile is the actual block device for data. - DataFile string - // DataLoopback loopback file, if used. - DataLoopback string - // MetadataFile is the actual block device for metadata. - MetadataFile string - // MetadataLoopback is the loopback file, if used. - MetadataLoopback string - // Data is the disk used for data. - Data DiskUsage - // Metadata is the disk used for meta data. - Metadata DiskUsage - // BaseDeviceSize is base size of container and image - BaseDeviceSize uint64 - // BaseDeviceFS is backing filesystem. - BaseDeviceFS string - // SectorSize size of the vector. - SectorSize uint64 - // UdevSyncSupported is true if sync is supported. - UdevSyncSupported bool - // DeferredRemoveEnabled is true then the device is not unmounted. - DeferredRemoveEnabled bool - // True if deferred deletion is enabled. This is different from - // deferred removal. "removal" means that device mapper device is - // deactivated. Thin device is still in thin pool and can be activated - // again. But "deletion" means that thin device will be deleted from - // thin pool and it can't be activated again. - DeferredDeleteEnabled bool - DeferredDeletedDeviceCount uint -} - -// Structure used to export image/container metadata in docker inspect. -type deviceMetadata struct { - deviceID int - deviceSize uint64 // size in bytes - deviceName string // Device name as used during activation -} - -// DevStatus returns information about device mounted containing its id, size and sector information. -type DevStatus struct { - // DeviceID is the id of the device. - DeviceID int - // Size is the size of the filesystem. - Size uint64 - // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. - TransactionID uint64 - // SizeInSectors indicates the size of the sectors allocated. - SizeInSectors uint64 - // MappedSectors indicates number of mapped sectors. - MappedSectors uint64 - // HighestMappedSector is the pointer to the highest mapped sector. - HighestMappedSector uint64 -} - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *devInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = "base" - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *devInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) metadataDir() string { - return path.Join(devices.root, "metadata") -} - -func (devices *DeviceSet) metadataFile(info *devInfo) string { - file := info.Hash - if file == "" { - file = "base" - } - return path.Join(devices.metadataDir(), file) -} - -func (devices *DeviceSet) transactionMetaFile() string { - return path.Join(devices.metadataDir(), transactionMetaFile) -} - -func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), deviceSetMetaFile) -} - -func (devices *DeviceSet) oldMetadataFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - if devices.thinPoolDevice == "" { - return devices.devicePrefix + "-pool" - } - return devices.thinPoolDevice -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := os.Stat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists and new size is larger than its current size, it grows to the new size. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - - if fi, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - - if err := file.Truncate(size); err != nil { - return "", err - } - } else { - if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) - } - } else if fi.Size() > size { - logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateTransactionID() uint64 { - devices.OpenTransactionID = devices.TransactionID + 1 - return devices.OpenTransactionID -} - -func (devices *DeviceSet) updatePoolTransactionID() error { - if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { - return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) - } - devices.TransactionID = devices.OpenTransactionID - return nil -} - -func (devices *DeviceSet) removeMetadata(info *devInfo) error { - if err := os.RemoveAll(devices.metadataFile(info)); err != nil { - return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) - } - return nil -} - -// Given json data and file path, write it to disk -func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") - if err != nil { - return fmt.Errorf("devmapper: Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := os.Rename(tmpFile.Name(), filePath); err != nil { - return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - return nil -} - -func (devices *DeviceSet) saveMetadata(info *devInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { - var mask byte - i := deviceID % 8 - mask = 1 << uint(i) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask -} - -func (devices *DeviceSet) markDeviceIDFree(deviceID int) { - var mask byte - i := deviceID % 8 - mask = ^(1 << uint(i)) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask -} - -func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { - var mask byte - i := deviceID % 8 - mask = (1 << uint(i)) - if (devices.deviceIDMap[deviceID/8] & mask) != 0 { - return false - } - return true -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { - info := devices.Devices[hash] - if info == nil { - info = devices.loadMetadata(hash) - if info == nil { - return nil, fmt.Errorf("devmapper: Unknown device %s", hash) - } - - devices.Devices[hash] = info - } - return info, nil -} - -func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) - return info, err -} - -// This function relies on that device hash map has been loaded in advance. -// Should be called with devices.Lock() held. -func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debugf("devmapper: constructDeviceIDMap()") - defer logrus.Debugf("devmapper: constructDeviceIDMap() END") - - for _, info := range devices.Devices { - devices.markDeviceIDUsed(info.DeviceID) - logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) - } -} - -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { - - // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if strings.HasPrefix(finfo.Name(), ".") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if finfo.Name() == deviceSetMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if finfo.Name() == transactionMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - logrus.Debugf("devmapper: Loading data for file %s", path) - - hash := finfo.Name() - if hash == "base" { - hash = "" - } - - // Include deleted devices also as cleanup delete device logic - // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) - } - - return nil -} - -func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debugf("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") - - var scan = func(path string, info os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("devmapper: Can't walk the file %s", path) - return nil - } - - // Skip any directories - if info.IsDir() { - return nil - } - - return devices.deviceFileWalkFunction(path, info) - } - - return filepath.Walk(devices.metadataDir(), scan) -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(id int, hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - } - - delete(devices.Devices, hash) - - if err := devices.removeMetadata(info); err != nil { - logrus.Debugf("devmapper: Error removing metadata: %s", err) - return err - } - - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - Size: size, - TransactionID: transactionID, - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) - - if info.Deleted && !ignoreDeleted { - return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) - } - - // Make sure deferred removal on device is canceled, if one was - // scheduled. - if err := devices.cancelDeferredRemoval(info); err != nil { - return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) - } - - if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) -} - -// Return true only if kernel supports xfs and mkfs.xfs is available -func xfsSupported() bool { - // Make sure mkfs.xfs is available - if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return false - } - - // Check if kernel supports xfs filesystem or not. - exec.Command("modprobe", "xfs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - return false - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.HasSuffix(s.Text(), "\txfs") { - return true - } - } - - if err := s.Err(); err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - } - return false -} - -func determineDefaultFS() string { - if xfsSupported() { - return "xfs" - } - - logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesnt support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") - return "ext4" -} - -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() - - args := []string{} - for _, arg := range devices.mkfsArgs { - args = append(args, arg) - } - - args = append(args, devname) - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { - return err - } - - logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) - defer func() { - if err != nil { - logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) - } else { - logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) - } - }() - - switch devices.filesystem { - case "xfs": - err = exec.Command("mkfs.xfs", args...).Run() - case "ext4": - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() - if err != nil { - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() - } - if err != nil { - return err - } - err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() - default: - err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) - } - return -} - -func (devices *DeviceSet) migrateOldMetaData() error { - // Migrate old metadata file - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) - if err != nil && !os.IsNotExist(err) { - return err - } - - if jsonData != nil { - m := metaData{Devices: make(map[string]*devInfo)} - - if err := json.Unmarshal(jsonData, &m); err != nil { - return err - } - - for hash, info := range m.Devices { - info.Hash = hash - devices.saveMetadata(info) - } - if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { - return err - } - - } - - return nil -} - -// Cleanup deleted devices. It assumes that all the devices have been -// loaded in the hash table. -func (devices *DeviceSet) cleanupDeletedDevices() error { - devices.Lock() - - // If there are no deleted devices, there is nothing to do. - if devices.nrDeletedDevices == 0 { - devices.Unlock() - return nil - } - - var deletedDevices []*devInfo - - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) - deletedDevices = append(deletedDevices, info) - } - - // Delete the deleted devices. DeleteDevice() first takes the info lock - // and then devices.Lock(). So drop it to avoid deadlock. - devices.Unlock() - - for _, info := range deletedDevices { - // This will again try deferred deletion. - if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) - } - } - - return nil -} - -func (devices *DeviceSet) countDeletedDevices() { - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - devices.nrDeletedDevices++ - } -} - -func (devices *DeviceSet) startDeviceDeletionWorker() { - // Deferred deletion is not enabled. Don't do anything. - if !devices.deferredDelete { - return - } - - logrus.Debugf("devmapper: Worker to cleanup deleted devices started") - for range devices.deletionWorkerTicker.C { - devices.cleanupDeletedDevices() - } -} - -func (devices *DeviceSet) initMetaData() error { - devices.Lock() - defer devices.Unlock() - - if err := devices.migrateOldMetaData(); err != nil { - return err - } - - _, transactionID, _, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - - devices.TransactionID = transactionID - - if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%v", err) - } - - devices.constructDeviceIDMap() - devices.countDeletedDevices() - - if err := devices.processPendingTransaction(); err != nil { - return err - } - - // Start a goroutine to cleanup Deleted Devices - go devices.startDeviceDeletionWorker() - return nil -} - -func (devices *DeviceSet) incNextDeviceID() { - // IDs are 24bit, so wrap around - devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID -} - -func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { - devices.incNextDeviceID() - for i := 0; i <= maxDeviceID; i++ { - if devices.isDeviceIDFree(devices.NextDeviceID) { - devices.markDeviceIDUsed(devices.NextDeviceID) - return devices.NextDeviceID, nil - } - devices.incNextDeviceID() - } - - return 0, fmt.Errorf("devmapper: Unable to find a free device ID") -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating device: %s", err) - devices.markDeviceIDFree(deviceID) - return nil, err - } - break - } - - logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) - info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) - if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - return info, nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo) error { - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - - for { - if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating snap device: %s", err) - devices.markDeviceIDFree(deviceID) - return err - } - break - } - - if _, err := devices.registerDevice(deviceID, hash, baseInfo.Size, devices.OpenTransactionID); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - logrus.Debugf("devmapper: Error registering device: %s", err) - return err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetadata(hash string) *devInfo { - info := &devInfo{Hash: hash, devices: devices} - - jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) - if err != nil { - return nil - } - - if err := json.Unmarshal(jsonData, &info); err != nil { - return nil - } - - if info.DeviceID > maxDeviceID { - logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) - return nil - } - - return info -} - -func getDeviceUUID(device string) (string, error) { - out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() - if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) - } - - uuid := strings.TrimSuffix(string(out), "\n") - uuid = strings.TrimSpace(uuid) - logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) - return uuid, nil -} - -func (devices *DeviceSet) getBaseDeviceSize() uint64 { - info, _ := devices.lookupDevice("") - if info == nil { - return 0 - } - return info.Size -} - -func (devices *DeviceSet) getBaseDeviceFS() string { - return devices.BaseDeviceFilesystem -} - -func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - if devices.BaseDeviceUUID != uuid { - return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) - } - - if devices.BaseDeviceFilesystem == "" { - fsType, err := ProbeFsType(baseInfo.DevName()) - if err != nil { - return err - } - if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { - return err - } - } - - // If user specified a filesystem using dm.fs option and current - // file system of base image is not same, warn user that dm.fs - // will be ignored. - if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) - devices.filesystem = devices.BaseDeviceFilesystem - } - return nil -} - -func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { - devices.BaseDeviceFilesystem = fs - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - devices.BaseDeviceUUID = uuid - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) createBaseImage() error { - logrus.Debugf("devmapper: Initializing base device-mapper thin volume") - - // Create initial device - info, err := devices.createRegisterDevice("") - if err != nil { - return err - } - - logrus.Debugf("devmapper: Creating filesystem on base device-mapper thin volume") - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return err - } - - if err := devices.createFilesystem(info); err != nil { - return err - } - - info.Initialized = true - if err := devices.saveMetadata(info); err != nil { - info.Initialized = false - return err - } - - if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - - return nil -} - -// Returns if thin pool device exists or not. If device exists, also makes -// sure it is a thin pool device and not some other type of device. -func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) - - info, err := devicemapper.GetInfo(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) - } - - // Device does not exist. - if info.Exists == 0 { - return false, nil - } - - _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) - } - - if deviceType != "thin-pool" { - return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) - } - - return true, nil -} - -func (devices *DeviceSet) checkThinPool() error { - _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - if dataUsed != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } - if transactionID != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", - devices.thinPoolDevice) - } - return nil -} - -// Base image is initialized properly. Either save UUID for first time (for -// upgrade case or verify UUID. -func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { - // If BaseDeviceUUID is nil (upgrade case), save it and return success. - if devices.BaseDeviceUUID == "" { - if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - return nil - } - - if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed.%v", err) - } - - return nil -} - -func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - - if !userBaseSize { - return nil - } - - if devices.baseFsSize < devices.getBaseDeviceSize() { - return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) - } - - if devices.baseFsSize == devices.getBaseDeviceSize() { - return nil - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - info.Size = devices.baseFsSize - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, info.Hash) - return err - } - - return devices.growFS(info) -} - -func (devices *DeviceSet) growFS(info *devInfo) error { - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("Error activating devmapper device: %s", err) - } - - defer devices.deactivateDevice(info) - - fsMountPoint := "/run/docker/mnt" - if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0700); err != nil { - return err - } - defer os.RemoveAll(fsMountPoint) - } - - options := "" - if devices.BaseDeviceFilesystem == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - options = joinMountOptions(options, devices.mountOptions) - - if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) - } - - defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) - - switch devices.BaseDeviceFilesystem { - case "ext4": - if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - case "xfs": - if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - default: - return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo, _ := devices.lookupDeviceWithLock("") - - // base image already exists. If it is initialized properly, do UUID - // verification and return. Otherwise remove image and set it up - // fresh. - - if oldInfo != nil { - if oldInfo.Initialized && !oldInfo.Deleted { - if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { - return err - } - - if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { - return err - } - - return nil - } - - logrus.Debugf("devmapper: Removing uninitialized base image") - // If previous base device is in deferred delete state, - // that needs to be cleaned up first. So don't try - // deferred deletion. - if err := devices.DeleteDevice("", true); err != nil { - return err - } - } - - // If we are setting up base image for the first time, make sure - // thin pool is empty. - if devices.thinPoolDevice != "" && oldInfo == nil { - if err := devices.checkThinPool(); err != nil { - return err - } - } - - // Create new base image device - if err := devices.createBaseImage(); err != nil { - return err - } - - return nil -} - -func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - syscall.CloseOnExec(fd) - } - } - } - } -} - -// DMLog implements logging using DevMapperLogger interface. -func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { - // By default libdm sends us all the messages including debug ones. - // We need to filter out messages here and figure out which one - // should be printed. - if level > logLevel { - return - } - - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - if level <= devicemapper.LogLevelErr { - logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else if level <= devicemapper.LogLevelInfo { - logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else { - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// ResizePool increases the size of the pool. -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - if len(devices.dataDevice) > 0 { - datafilename = devices.dataDevice - } - metadatafilename := path.Join(dirname, "metadata") - if len(devices.metadataDevice) > 0 { - metadatafilename = devices.metadataDevice - } - - datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("devmapper: Can't shrink file") - } - - dataloopback := loopback.FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := loopback.FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { - return fmt.Errorf("devmapper: Unable to reload pool: %s", err) - } - - // Resume the pool - if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) - if err != nil { - // There is no active transaction. This will be the case - // during upgrade. - if os.IsNotExist(err) { - devices.OpenTransactionID = devices.TransactionID - return nil - } - return err - } - - json.Unmarshal(jsonData, &devices.transaction) - return nil -} - -func (devices *DeviceSet) saveTransactionMetaData() error { - jsonData, err := json.Marshal(&devices.transaction) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) -} - -func (devices *DeviceSet) removeTransactionMetaData() error { - if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) rollbackTransaction() error { - logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) - - // A device id might have already been deleted before transaction - // closed. In that case this call will fail. Just leave a message - // in case of failure. - if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logrus.Errorf("devmapper: Unable to delete device: %s", err) - } - - dinfo := &devInfo{Hash: devices.DeviceIDHash} - if err := devices.removeMetadata(dinfo); err != nil { - logrus.Errorf("devmapper: Unable to remove metadata: %s", err) - } else { - devices.markDeviceIDFree(devices.DeviceID) - } - - if err := devices.removeTransactionMetaData(); err != nil { - logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) - } - - return nil -} - -func (devices *DeviceSet) processPendingTransaction() error { - if err := devices.loadTransactionMetaData(); err != nil { - return err - } - - // If there was open transaction but pool transaction ID is same - // as open transaction ID, nothing to roll back. - if devices.TransactionID == devices.OpenTransactionID { - return nil - } - - // If open transaction ID is less than pool transaction ID, something - // is wrong. Bail out. - if devices.OpenTransactionID < devices.TransactionID { - logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) - return nil - } - - // Pool transaction ID is not same as open transaction. There is - // a transaction which was not completed. - if err := devices.rollbackTransaction(); err != nil { - return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) - } - - devices.OpenTransactionID = devices.TransactionID - return nil -} - -func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) - if err != nil { - // For backward compatibility return success if file does - // not exist. - if os.IsNotExist(err) { - return nil - } - return err - } - - return json.Unmarshal(jsonData, devices) -} - -func (devices *DeviceSet) saveDeviceSetMetaData() error { - jsonData, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) -} - -func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { - devices.allocateTransactionID() - devices.DeviceIDHash = hash - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) refreshTransaction(DeviceID int) error { - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) closeTransaction() error { - if err := devices.updatePoolTransactionID(); err != nil { - logrus.Debugf("devmapper: Failed to close Transaction") - return err - } - return nil -} - -func determineDriverCapabilities(version string) error { - /* - * Driver version 4.27.0 and greater support deferred activation - * feature. - */ - - logrus.Debugf("devicemapper: driver version is %s", version) - - versionSplit := strings.Split(version, ".") - major, err := strconv.Atoi(versionSplit[0]) - if err != nil { - return graphdriver.ErrNotSupported - } - - if major > 4 { - driverDeferredRemovalSupport = true - return nil - } - - if major < 4 { - return nil - } - - minor, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return graphdriver.ErrNotSupported - } - - /* - * If major is 4 and minor is 27, then there is no need to - * check for patch level as it can not be less than 0. - */ - if minor >= 27 { - driverDeferredRemovalSupport = true - return nil - } - - return nil -} - -// Determine the major and minor number of loopback device -func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - stat, err := file.Stat() - if err != nil { - return 0, 0, err - } - - dev := stat.Sys().(*syscall.Stat_t).Rdev - majorNum := major(dev) - minorNum := minor(dev) - - logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) - return majorNum, minorNum, nil -} - -// Given a file which is backing file of a loop back device, find the -// loopback device name and its major/minor number. -func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { - file, err := os.Open(filename) - if err != nil { - logrus.Debugf("devmapper: Failed to open file %s", filename) - return "", 0, 0, err - } - - defer file.Close() - loopbackDevice := loopback.FindLoopDeviceFor(file) - if loopbackDevice == nil { - return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) - } - defer loopbackDevice.Close() - - Major, Minor, err := getDeviceMajorMinor(loopbackDevice) - if err != nil { - return "", 0, 0, err - } - return loopbackDevice.Name(), Major, Minor, nil -} - -// Get the major/minor numbers of thin pool data and metadata devices -func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { - var params, poolDataMajMin, poolMetadataMajMin string - - _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) - if err != nil { - return 0, 0, 0, 0, err - } - - if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { - return 0, 0, 0, 0, err - } - - logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) - - poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") - poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") - poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil -} - -func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { - poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() - if err != nil { - return err - } - - dirname := devices.loopbackDir() - - // data device has not been passed in. So there should be a data file - // which is being mounted as loop device. - if devices.dataDevice == "" { - datafilename := path.Join(dirname, "data") - dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) - if err != nil { - return err - } - - // Compare the two - if poolDataMajor == dataMajor && poolDataMinor == dataMinor { - devices.dataDevice = dataLoopDevice - devices.dataLoopFile = datafilename - } - - } - - // metadata device has not been passed in. So there should be a - // metadata file which is being mounted as loop device. - if devices.metadataDevice == "" { - metadatafilename := path.Join(dirname, "metadata") - metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) - if err != nil { - return err - } - if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { - devices.metadataDevice = metadataLoopDevice - devices.metadataLoopFile = metadatafilename - } - } - - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) error { - // give ourselves to libdm as a log handler - devicemapper.LogInit(devices) - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return graphdriver.ErrNotSupported - } - - // If user asked for deferred removal then check both libdm library - // and kernel driver support deferred removal otherwise error out. - if enableDeferredRemoval { - if !driverDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") - } - if !devicemapper.LibraryDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") - } - logrus.Debugf("devmapper: Deferred removal support enabled.") - devices.deferredRemove = true - } - - if enableDeferredDeletion { - if !devices.deferredRemove { - return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") - } - logrus.Debugf("devmapper: Deferred deletion support enabled.") - devices.deferredDelete = true - } - - // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - logrus.Warn("devmapper: Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/daemon/#daemon-storage-driver-option") - } - - //create the root dir of the devmapper driver ownership to match this - //daemon's remapped root uid/gid so containers can start properly - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { - return err - } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { - return err - } - - // Set the device prefix from the device id and inode of the docker root dir - - st, err := os.Stat(devices.root) - if err != nil { - return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) - } - sysSt := st.Sys().(*syscall.Stat_t) - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // docker-maj,min[-inode] stands for: - // - Managed by docker - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) - logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the thin-pool device - poolExists, err := devices.thinPoolExists(devices.getPoolName()) - if err != nil { - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - createdLoopback := false - - // If the pool doesn't exist, create it - if !poolExists && devices.thinPoolDevice == "" { - logrus.Debugf("devmapper: Pool doesn't exist. Creating it.") - - var ( - dataFile *os.File - metadataFile *os.File - ) - - if devices.dataDevice == "" { - // Make sure the sparse images exist in /devicemapper/data - - hasData := devices.hasImage("data") - - if !doInit && !hasData { - return errors.New("Loopback data file not found") - } - - if !hasData { - createdLoopback = true - } - - data, err := devices.ensureImage("data", devices.dataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) - return err - } - - dataFile, err = loopback.AttachLoopDevice(data) - if err != nil { - return err - } - devices.dataLoopFile = data - devices.dataDevice = dataFile.Name() - } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer dataFile.Close() - - if devices.metadataDevice == "" { - // Make sure the sparse images exist in /devicemapper/metadata - - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") - } - - if !hasMetadata { - createdLoopback = true - } - - metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) - return err - } - - metadataFile, err = loopback.AttachLoopDevice(metadata) - if err != nil { - return err - } - devices.metadataLoopFile = metadata - devices.metadataDevice = metadataFile.Name() - } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer metadataFile.Close() - - if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { - return err - } - } - - // Pool already exists and caller did not pass us a pool. That means - // we probably created pool earlier and could not remove it as some - // containers were still using it. Detect some of the properties of - // pool, like is it using loop devices. - if poolExists && devices.thinPoolDevice == "" { - if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the transaction id and migrate old metadata - if !createdLoopback { - if err := devices.initMetaData(); err != nil { - return err - } - } - - if devices.thinPoolDevice == "" { - if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logrus.Warnf("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") - } - } - - // Right now this loads only NextDeviceID. If there is more metadata - // down the line, we might have to move it earlier. - if err := devices.loadDeviceSetMetaData(); err != nil { - return err - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) - return err - } - } - - return nil -} - -// AddDevice adds a device and registers in the hash. -func (devices *DeviceSet) AddDevice(hash, baseHash string) error { - logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) - - // If a deleted device exists, return error. - baseInfo, err := devices.lookupDeviceWithLock(baseHash) - if err != nil { - return err - } - - if baseInfo.Deleted { - return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // Also include deleted devices in case hash of new device is - // same as one of the deleted devices. - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) - } - - if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { - return err - } - - return nil -} - -func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { - // If device is already in deleted state, there is nothing to be done. - if info.Deleted { - return nil - } - - logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) - - info.Deleted = true - - // save device metadata to reflect deleted state. - if err := devices.saveMetadata(info); err != nil { - info.Deleted = false - return err - } - - devices.nrDeletedDevices++ - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { - if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) - return err - } - - defer devices.closeTransaction() - - err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) - if err != nil { - // If syncDelete is true, we want to return error. If deferred - // deletion is not enabled, we return an error. If error is - // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { - logrus.Debugf("devmapper: Error deleting device: %s", err) - return err - } - } - - if err == nil { - if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { - return err - } - // If device was already in deferred delete state that means - // deletion was being tried again later. Reduce the deleted - // device count. - if info.Deleted { - devices.nrDeletedDevices-- - } - devices.markDeviceIDFree(info.DeviceID) - } else { - if err := devices.markForDeferredDeletion(info); err != nil { - return err - } - } - - return nil -} - -// Issue discard only if device open count is zero. -func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually. - // Even if device is deferred deleted, activate it and issue - // discards. - if err := devices.activateDeviceIfNeeded(info, true); err != nil { - return err - } - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.OpenCount != 0 { - logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) - return nil - } - - if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) - } - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { - if devices.doBlkDiscard { - devices.issueDiscard(info) - } - - // Try to deactivate device in case it is active. - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Error deactivating device: %s", err) - return err - } - - if err := devices.deleteTransaction(info, syncDelete); err != nil { - return err - } - - return nil -} - -// DeleteDevice will return success if device has been marked for deferred -// removal. If one wants to override that and want DeleteDevice() to fail if -// device was busy and could not be deleted, set syncDelete=true. -func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // If mountcount is not zero, that means devices is still in use - // or has not been Put() properly. Fail device deletion. - - if info.mountCount != 0 { - return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount) - } - - return devices.deleteDevice(info, syncDelete) -} - -func (devices *DeviceSet) deactivatePool() error { - logrus.Debugf("devmapper: deactivatePool()") - defer logrus.Debugf("devmapper: deactivatePool END") - devname := devices.getPoolDevName() - - devinfo, err := devicemapper.GetInfo(devname) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - if err := devicemapper.RemoveDevice(devname); err != nil { - return err - } - - if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) - defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - - if devices.deferredRemove { - if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { - return err - } - } else { - if err := devices.removeDevice(info.Name()); err != nil { - return err - } - } - return nil -} - -// Issues the underlying dm remove operation. -func (devices *DeviceSet) removeDevice(devname string) error { - var err error - - logrus.Debugf("devmapper: removeDevice START(%s)", devname) - defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) - - for i := 0; i < 200; i++ { - err = devicemapper.RemoveDevice(devname) - if err == nil { - break - } - if err != devicemapper.ErrBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - - return err -} - -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - if !devices.deferredRemove { - return nil - } - - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) - - devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - - if devinfo != nil && devinfo.DeferredRemove == 0 { - return nil - } - - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err == nil { - break - } - - if err == devicemapper.ErrEnxio { - // Device is probably already gone. Return success. - return nil - } - - if err != devicemapper.ErrBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - return err -} - -// Shutdown shuts down the device by unmounting the root. -func (devices *DeviceSet) Shutdown() error { - logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) - logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) - defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) - - var devs []*devInfo - - // Stop deletion worker. This should start delivering new events to - // ticker channel. That means no new instance of cleanupDeletedDevice() - // will run after this call. If one instance is already running at - // the time of the call, it must be holding devices.Lock() and - // we will block on this lock till cleanup function exits. - devices.deletionWorkerTicker.Stop() - - devices.Lock() - // Save DeviceSet Metadata first. Docker kills all threads if they - // don't finish in certain time. It is possible that Shutdown() - // routine does not finish in time as we loop trying to deactivate - // some devices while these are busy. In that case shutdown() routine - // will be killed and we will not get a chance to save deviceset - // metadata. Hence save this early before trying to deactivate devices. - devices.saveDeviceSetMetaData() - - for _, info := range devices.Devices { - devs = append(devs, info) - } - devices.Unlock() - - for _, info := range devs { - info.lock.Lock() - if info.mountCount > 0 { - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { - logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err) - } - - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err) - } - devices.Unlock() - } - info.lock.Unlock() - } - - info, _ := devices.lookupDeviceWithLock("") - if info != nil { - info.lock.Lock() - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) - } - devices.Unlock() - info.lock.Unlock() - } - - devices.Lock() - if devices.thinPoolDevice == "" { - if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) - } - } - devices.Unlock() - - return nil -} - -// MountDevice mounts the device if not already mounted. -func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - if info.Deleted { - return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - if info.mountCount > 0 { - if path != info.mountPath { - return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path) - } - - info.mountCount++ - return nil - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - fstype, err := ProbeFsType(info.DevName()) - if err != nil { - return err - } - - options := "" - - if fstype == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - - options = joinMountOptions(options, devices.mountOptions) - options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) - - if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) - } - - info.mountCount = 1 - info.mountPath = path - - return nil -} - -// UnmountDevice unmounts the device and removes it from hash. -func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) - - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // If there are running containers when daemon crashes, during daemon - // restarting, it will kill running containers and will finally call - // Put() without calling Get(). So info.MountCount may become negative. - // if info.mountCount goes negative, we do the unmount and assign - // it to 0. - - info.mountCount-- - if info.mountCount > 0 { - return nil - } else if info.mountCount < 0 { - logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0") - info.mountCount = 0 - } - - logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { - return err - } - logrus.Debugf("devmapper: Unmount done") - - if err := devices.deactivateDevice(info); err != nil { - return err - } - - info.mountPath = "" - - return nil -} - -// HasDevice returns true if the device metadata exists. -func (devices *DeviceSet) HasDevice(hash string) bool { - info, _ := devices.lookupDeviceWithLock(hash) - return info != nil -} - -// List returns a list of device ids. -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) - if err != nil { - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { - return - } - return -} - -// GetDeviceStatus provides size, mapped sectors -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - status := &DevStatus{ - DeviceID: info.DeviceID, - Size: info.Size, - TransactionID: info.TransactionID, - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - - if err != nil { - return nil, err - } - - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -// DataDevicePath returns the path to the data storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) DataDevicePath() string { - return devices.dataDevice -} - -// MetadataDevicePath returns the path to the metadata storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) MetadataDevicePath() string { - return devices.metadataDevice -} - -func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(syscall.Statfs_t) - if err := syscall.Statfs(loopFile, buf); err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) - return 0, err - } - return buf.Bfree * uint64(buf.Bsize), nil -} - -func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { - if loopFile != "" { - fi, err := os.Stat(loopFile) - if err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) - return false, err - } - return fi.Mode().IsRegular(), nil - } - return false, nil -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataFile = devices.DataDevicePath() - status.DataLoopback = devices.dataLoopFile - status.MetadataFile = devices.MetadataDevicePath() - status.MetadataLoopback = devices.metadataLoopFile - status.UdevSyncSupported = devicemapper.UdevSyncSupported() - status.DeferredRemoveEnabled = devices.deferredRemove - status.DeferredDeleteEnabled = devices.deferredDelete - status.DeferredDeletedDeviceCount = devices.nrDeletedDevices - status.BaseDeviceSize = devices.getBaseDeviceSize() - status.BaseDeviceFS = devices.getBaseDeviceFS() - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - status.Data.Available = status.Data.Total - status.Data.Used - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - status.Metadata.Available = status.Metadata.Total - status.Metadata.Used - - status.SectorSize = blockSizeInSectors * 512 - - if check, _ := devices.isRealFile(devices.dataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) - if err == nil && actualSpace < status.Data.Available { - status.Data.Available = actualSpace - } - } - - if check, _ := devices.isRealFile(devices.metadataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) - if err == nil && actualSpace < status.Metadata.Available { - status.Metadata.Available = actualSpace - } - } - } - - return status -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} - return metadata, nil -} - -// NewDeviceSet creates the device set based on the options provided. -func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { - devicemapper.SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - metaData: metaData{Devices: make(map[string]*devInfo)}, - dataLoopbackSize: defaultDataLoopbackSize, - metaDataLoopbackSize: defaultMetaDataLoopbackSize, - baseFsSize: defaultBaseFsSize, - overrideUdevSyncCheck: defaultUdevSyncOverride, - doBlkDiscard: true, - thinpBlockSize: defaultThinpBlockSize, - deviceIDMap: make([]byte, deviceIDMapSz), - deletionWorkerTicker: time.NewTicker(time.Second * 30), - uidMaps: uidMaps, - gidMaps: gidMaps, - } - - foundBlkDiscard := false - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "dm.basesize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - userBaseSize = true - devices.baseFsSize = uint64(size) - case "dm.loopdatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.dataLoopbackSize = size - case "dm.loopmetadatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.metaDataLoopbackSize = size - case "dm.fs": - if val != "ext4" && val != "xfs" { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) - } - devices.filesystem = val - case "dm.mkfsarg": - devices.mkfsArgs = append(devices.mkfsArgs, val) - case "dm.mountopt": - devices.mountOptions = joinMountOptions(devices.mountOptions, val) - case "dm.metadatadev": - devices.metadataDevice = val - case "dm.datadev": - devices.dataDevice = val - case "dm.thinpooldev": - devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") - case "dm.blkdiscard": - foundBlkDiscard = true - devices.doBlkDiscard, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.blocksize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - // convert to 512b sectors - devices.thinpBlockSize = uint32(size) >> 9 - case "dm.override_udev_sync_check": - devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_removal": - enableDeferredRemoval, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_deletion": - enableDeferredDeletion, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - default: - return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) - } - } - - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { - devices.doBlkDiscard = false - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go deleted file mode 100644 index 9ab3e4f8..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go +++ /dev/null @@ -1,106 +0,0 @@ -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognized ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go deleted file mode 100644 index c03a7730..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go +++ /dev/null @@ -1,204 +0,0 @@ -// +build linux - -package devmapper - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/devicemapper" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/go-units" -) - -func init() { - graphdriver.Register("devicemapper", Init) -} - -// Driver contains the device set mounted and the home directory -type Driver struct { - *DeviceSet - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// Init creates a driver with the given home and the set of options. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) - if err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - DeviceSet: deviceSet, - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -// Status returns the status about the driver in a printable format. -// Information returned contains Pool Name, Data File, Metadata file, disk usage by -// the data and metadata, etc. -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, - {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, - {"Backing Filesystem", s.BaseDeviceFS}, - {"Data file", s.DataFile}, - {"Metadata file", s.MetadataFile}, - {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, - {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, - {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, - {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, - {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, - {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, - {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, - } - if len(s.DataLoopback) > 0 { - status = append(status, [2]string{"Data loop file", s.DataLoopback}) - } - if len(s.MetadataLoopback) > 0 { - status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) - } - if vStr, err := devicemapper.GetLibraryVersion(); err == nil { - status = append(status, [2]string{"Library Version", vStr}) - } - return status -} - -// GetMetadata returns a map of information about the device. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - m, err := d.DeviceSet.exportDeviceMetadata(id) - - if err != nil { - return nil, err - } - - metadata := make(map[string]string) - metadata["DeviceId"] = strconv.Itoa(m.deviceID) - metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) - metadata["DeviceName"] = m.deviceName - return metadata, nil -} - -// Cleanup unmounts a device. -func (d *Driver) Cleanup() error { - err := d.DeviceSet.Shutdown() - - if err2 := mount.Unmount(d.home); err == nil { - err = err2 - } - - return err -} - -// Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent, mountLabel string) error { - if err := d.DeviceSet.AddDevice(id, parent); err != nil { - return err - } - - return nil -} - -// Remove removes a device with a given id, unmounts the filesystem. -func (d *Driver) Remove(id string) error { - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return err - } - - mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} - -// Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id, mountLabel string) (string, error) { - mp := path.Join(d.home, "mnt", id) - - uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - - // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { - return "", err - } - - rootFs := path.Join(mp, "rootfs") - if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - - idFile := path.Join(mp, "id") - if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { - // Create an "id" file with the container/image id in it to help reconstruct this in case - // of later problems - if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - } - - return rootFs, nil -} - -// Put unmounts a device and removes it. -func (d *Driver) Put(id string) error { - mp := path.Join(d.home, "mnt", id) - err := d.DeviceSet.UnmountDevice(id, mp) - if err != nil { - logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) - } - return err -} - -// Exists checks to see if the device exists. -func (d *Driver) Exists(id string) bool { - return d.DeviceSet.HasDevice(id) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go deleted file mode 100644 index cca1fe1b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package devmapper - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "syscall" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - mntpoint, err := os.Stat(mountpoint) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - parent, err := os.Stat(filepath.Join(mountpoint, "..")) - if err != nil { - return false, err - } - mntpointSt := mntpoint.Sys().(*syscall.Stat_t) - parentSt := parent.Sys().(*syscall.Stat_t) - return mntpointSt.Dev != parentSt.Dev, nil -} - -type probeData struct { - fsName string - magic string - offset uint64 -} - -// ProbeFsType returns the filesystem name for the given device id. -func ProbeFsType(device string) (string, error) { - probes := []probeData{ - {"btrfs", "_BHRfS_M", 0x10040}, - {"ext4", "\123\357", 0x438}, - {"xfs", "XFSB", 0}, - } - - maxLen := uint64(0) - for _, p := range probes { - l := p.offset + uint64(len(p.magic)) - if l > maxLen { - maxLen = l - } - } - - file, err := os.Open(device) - if err != nil { - return "", err - } - defer file.Close() - - buffer := make([]byte, maxLen) - l, err := file.Read(buffer) - if err != nil { - return "", err - } - - if uint64(l) != maxLen { - return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) - } - - for _, p := range probes { - if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { - return p.fsName, nil - } - } - - return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) -} - -func joinMountOptions(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return a + "," + b -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go deleted file mode 100644 index 534f2e58..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go +++ /dev/null @@ -1,299 +0,0 @@ -// +build linux freebsd - -package graphtest - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "syscall" - "testing" - - "github.com/docker/docker/daemon/graphdriver" -) - -var ( - drv *Driver -) - -// Driver conforms to graphdriver.Driver interface and -// contains information such as root and reference count of the number of clients using it. -// This helps in testing drivers added into the framework. -type Driver struct { - graphdriver.Driver - root string - refCount int -} - -// InitLoopbacks ensures that the loopback devices are properly created within -// the system running the device mapper tests. -func InitLoopbacks() error { - statT, err := getBaseLoopStats() - if err != nil { - return err - } - // create at least 8 loopback files, ya, that is a good number - for i := 0; i < 8; i++ { - loopPath := fmt.Sprintf("/dev/loop%d", i) - // only create new loopback files if they don't exist - if _, err := os.Stat(loopPath); err != nil { - if mkerr := syscall.Mknod(loopPath, - uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { - return mkerr - } - os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) - } - } - return nil -} - -// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the -// loop0 device on the system. If it does not exist we assume 0,0,0660 for the -// stat data -func getBaseLoopStats() (*syscall.Stat_t, error) { - loop0, err := os.Stat("/dev/loop0") - if err != nil { - if os.IsNotExist(err) { - return &syscall.Stat_t{ - Uid: 0, - Gid: 0, - Mode: 0660, - }, nil - } - return nil, err - } - return loop0.Sys().(*syscall.Stat_t), nil -} - -func newDriver(t *testing.T, name string) *Driver { - root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") - if err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(root, 0755); err != nil { - t.Fatal(err) - } - - d, err := graphdriver.GetDriver(name, root, nil, nil, nil) - if err != nil { - t.Logf("graphdriver: %v\n", err) - if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { - t.Skipf("Driver %s not supported", name) - } - t.Fatal(err) - } - return &Driver{d, root, 1} -} - -func cleanup(t *testing.T, d *Driver) { - if err := drv.Cleanup(); err != nil { - t.Fatal(err) - } - os.RemoveAll(d.root) -} - -// GetDriver create a new driver with given name or return a existing driver with the name updating the reference count. -func GetDriver(t *testing.T, name string) graphdriver.Driver { - if drv == nil { - drv = newDriver(t, name) - } else { - drv.refCount++ - } - return drv -} - -// PutDriver removes the driver if it is no longer used and updates the reference count. -func PutDriver(t *testing.T) { - if drv == nil { - t.Skip("No driver to put!") - } - drv.refCount-- - if drv.refCount == 0 { - cleanup(t, drv) - drv = nil - } -} - -func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { - fi, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } - - if fi.Mode()&os.ModeType != mode&os.ModeType { - t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) - } - - if fi.Mode()&os.ModePerm != mode&os.ModePerm { - t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) - } - - if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { - t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) - } - - if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { - t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) - } - - if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { - t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) - } - - if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - if stat.Uid != uid { - t.Fatalf("%s no owned by uid %d", path, uid) - } - if stat.Gid != gid { - t.Fatalf("%s not owned by gid %d", path, gid) - } - } - -} - -// readDir reads a directory just like ioutil.ReadDir() -// then hides specific files (currently "lost+found") -// so the tests don't "see" it -func readDir(dir string) ([]os.FileInfo, error) { - a, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - - b := a[:0] - for _, x := range a { - if x.Name() != "lost+found" { // ext4 always have this dir - b = append(b, x) - } - } - - return b, nil -} - -// DriverTestCreateEmpty creates an new image and verifies it is empty and the right metadata -func DriverTestCreateEmpty(t *testing.T, drivername string) { - driver := GetDriver(t, drivername) - defer PutDriver(t) - - if err := driver.Create("empty", "", ""); err != nil { - t.Fatal(err) - } - - if !driver.Exists("empty") { - t.Fatal("Newly created image doesn't exist") - } - - dir, err := driver.Get("empty", "") - if err != nil { - t.Fatal(err) - } - - verifyFile(t, dir, 0755|os.ModeDir, 0, 0) - - // Verify that the directory is empty - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 0 { - t.Fatal("New directory not empty") - } - - driver.Put("empty") - - if err := driver.Remove("empty"); err != nil { - t.Fatal(err) - } - -} - -func createBase(t *testing.T, driver graphdriver.Driver, name string) { - // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) - - if err := driver.Create(name, "", ""); err != nil { - t.Fatal(err) - } - - dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } - defer driver.Put(name) - - subdir := path.Join(dir, "a subdir") - if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { - t.Fatal(err) - } - if err := os.Chown(subdir, 1, 2); err != nil { - t.Fatal(err) - } - - file := path.Join(dir, "a file") - if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { - t.Fatal(err) - } -} - -func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { - dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } - defer driver.Put(name) - - subdir := path.Join(dir, "a subdir") - verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) - - file := path.Join(dir, "a file") - verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 2 { - t.Fatal("Unexpected files in base image") - } - -} - -// DriverTestCreateBase create a base driver and verify. -func DriverTestCreateBase(t *testing.T, drivername string) { - driver := GetDriver(t, drivername) - defer PutDriver(t) - - createBase(t, driver, "Base") - verifyBase(t, driver, "Base") - - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } -} - -// DriverTestCreateSnap Create a driver and snap and verify. -func DriverTestCreateSnap(t *testing.T, drivername string) { - driver := GetDriver(t, drivername) - defer PutDriver(t) - - createBase(t, driver, "Base") - - if err := driver.Create("Snap", "Base", ""); err != nil { - t.Fatal(err) - } - - verifyBase(t, driver, "Snap") - - if err := driver.Remove("Snap"); err != nil { - t.Fatal(err) - } - - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go deleted file mode 100644 index a50c5211..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go +++ /dev/null @@ -1 +0,0 @@ -package graphtest diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/copy.go deleted file mode 100644 index 7d81a83a..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/copy.go +++ /dev/null @@ -1,169 +0,0 @@ -// +build linux - -package overlay - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -type copyFlags int - -const ( - copyHardlink copyFlags = 1 << iota -) - -func copyRegular(srcPath, dstPath string, mode os.FileMode) error { - srcFile, err := os.Open(srcPath) - if err != nil { - return err - } - defer srcFile.Close() - - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) - if err != nil { - return err - } - defer dstFile.Close() - - _, err = pools.Copy(dstFile, srcFile) - - return err -} - -func copyXattr(srcPath, dstPath, attr string) error { - data, err := system.Lgetxattr(srcPath, attr) - if err != nil { - return err - } - if data != nil { - if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { - return err - } - } - return nil -} - -func copyDir(srcDir, dstDir string, flags copyFlags) error { - err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(srcDir, srcPath) - if err != nil { - return err - } - - dstPath := filepath.Join(dstDir, relPath) - if err != nil { - return err - } - - stat, ok := f.Sys().(*syscall.Stat_t) - if !ok { - return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) - } - - isHardlink := false - - switch f.Mode() & os.ModeType { - case 0: // Regular file - if flags©Hardlink != 0 { - isHardlink = true - if err := os.Link(srcPath, dstPath); err != nil { - return err - } - } else { - if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { - return err - } - } - - case os.ModeDir: - if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { - return err - } - - case os.ModeSymlink: - link, err := os.Readlink(srcPath) - if err != nil { - return err - } - - if err := os.Symlink(link, dstPath); err != nil { - return err - } - - case os.ModeNamedPipe: - fallthrough - case os.ModeSocket: - if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { - return err - } - - case os.ModeDevice: - if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { - return err - } - - default: - return fmt.Errorf("Unknown file type for %s\n", srcPath) - } - - // Everything below is copying metadata from src to dst. All this metadata - // already shares an inode for hardlinks. - if isHardlink { - return nil - } - - if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - - if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { - return err - } - - // We need to copy this attribute if it appears in an overlay upper layer, as - // this function is used to copy those. It is set by overlay if a directory - // is removed and then re-created and should not inherit anything from the - // same dir in the lower dir. - if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { - return err - } - - isSymlink := f.Mode()&os.ModeSymlink != 0 - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if !isSymlink { - if err := os.Chmod(dstPath, f.Mode()); err != nil { - return err - } - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if !isSymlink { - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) - if err := system.Chtimes(dstPath, aTime, mTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{stat.Atim, stat.Mtim} - if err := system.LUtimesNano(dstPath, ts); err != nil { - return err - } - } - return nil - }) - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go deleted file mode 100644 index 59131bba..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go +++ /dev/null @@ -1,476 +0,0 @@ -// +build linux - -package overlay - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "sync" - "syscall" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - - "github.com/opencontainers/runc/libcontainer/label" -) - -// This is a small wrapper over the NaiveDiffWriter that lets us have a custom -// implementation of ApplyDiff() - -var ( - // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. - ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") -) - -// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. -type ApplyDiffProtoDriver interface { - graphdriver.ProtoDriver - // ApplyDiff writes the diff to the archive for the given id and parent id. - // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. - ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) -} - -type naiveDiffDriverWithApply struct { - graphdriver.Driver - applyDiff ApplyDiffProtoDriver -} - -// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. -func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { - return &naiveDiffDriverWithApply{ - Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), - applyDiff: driver, - } -} - -// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. -func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - b, err := d.applyDiff.ApplyDiff(id, parent, diff) - if err == ErrApplyDiffFallback { - return d.Driver.ApplyDiff(id, parent, diff) - } - return b, err -} - -// This backend uses the overlay union filesystem for containers -// plus hard link file sharing for images. - -// Each container/image can have a "root" subdirectory which is a plain -// filesystem hierarchy, or they can use overlay. - -// If they use overlay there is a "upper" directory and a "lower-id" -// file, as well as "merged" and "work" directories. The "upper" -// directory has the upper layer of the overlay, and "lower-id" contains -// the id of the parent whose "root" directory shall be used as the lower -// layer in the overlay. The overlay itself is mounted in the "merged" -// directory, and the "work" dir is needed for overlay to work. - -// When a overlay layer is created there are two cases, either the -// parent has a "root" dir, then we start out with a empty "upper" -// directory overlaid on the parents root. This is typically the -// case with the init layer of a container which is based on an image. -// If there is no "root" in the parent, we inherit the lower-id from -// the parent and start by making a copy in the parent's "upper" dir. -// This is typically the case for a container layer which copies -// its parent -init upper layer. - -// Additionally we also have a custom implementation of ApplyLayer -// which makes a recursive copy of the parent "root" layer using -// hardlinks to share file data, and then applies the layer on top -// of that. This means all child images share file (but not directory) -// data with the parent. - -// ActiveMount contains information about the count, path and whether is mounted or not. -// This information is part of the Driver, that contains list of active mounts that are part of this overlay. -type ActiveMount struct { - count int - path string - mounted bool -} - -// Driver contains information about the home directory and the list of active mounts that are created using this driver. -type Driver struct { - home string - sync.Mutex // Protects concurrent modification to active - active map[string]*ActiveMount - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -var backingFs = "" - -func init() { - graphdriver.Register("overlay", Init) -} - -// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - if err := supportsOverlay(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - // check if they are running over btrfs or aufs - switch fsMagic { - case graphdriver.FsMagicBtrfs: - logrus.Error("'overlay' is not supported over btrfs.") - return nil, graphdriver.ErrIncompatibleFS - case graphdriver.FsMagicAufs: - logrus.Error("'overlay' is not supported over aufs.") - return nil, graphdriver.ErrIncompatibleFS - case graphdriver.FsMagicZfs: - logrus.Error("'overlay' is not supported over zfs.") - return nil, graphdriver.ErrIncompatibleFS - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the driver home dir - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - d := &Driver{ - home: home, - active: make(map[string]*ActiveMount), - uidMaps: uidMaps, - gidMaps: gidMaps, - } - - return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil -} - -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported - exec.Command("modprobe", "overlay").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil - } - } - logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return graphdriver.ErrNotSupported -} - -func (d *Driver) String() string { - return "overlay" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Backing Filesystem" used in this implementation. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Backing Filesystem", backingFs}, - } -} - -// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - metadata := make(map[string]string) - - // If id has a root, it is an image - rootDir := path.Join(dir, "root") - if _, err := os.Stat(rootDir); err == nil { - metadata["RootDir"] = rootDir - return metadata, nil - } - - lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) - if err != nil { - return nil, err - } - - metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") - metadata["UpperDir"] = path.Join(dir, "upper") - metadata["WorkDir"] = path.Join(dir, "work") - metadata["MergedDir"] = path.Join(dir, "merged") - - return metadata, nil -} - -// Cleanup simply returns nil and do not change the existing filesystem. -// This is required to satisfy the graphdriver.Driver interface. -func (d *Driver) Cleanup() error { - return nil -} - -// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. -// The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent, mountLabel string) (retErr error) { - dir := d.dir(id) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { - return err - } - - defer func() { - // Clean up on failure - if retErr != nil { - os.RemoveAll(dir) - } - }() - - // Toplevel images are just a "root" dir - if parent == "" { - if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { - return err - } - return nil - } - - parentDir := d.dir(parent) - - // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { - return err - } - - // If parent has a root, just do a overlay to it - parentRoot := path.Join(parentDir, "root") - - if s, err := os.Lstat(parentRoot); err == nil { - if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { - return err - } - return nil - } - - // Otherwise, copy the upper and the lower-id from the parent - - lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) - if err != nil { - return err - } - - if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { - return err - } - - parentUpperDir := path.Join(parentDir, "upper") - s, err := os.Lstat(parentUpperDir) - if err != nil { - return err - } - - upperDir := path.Join(dir, "upper") - if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { - return err - } - - return copyDir(parentUpperDir, upperDir, 0) -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, id) -} - -// Remove cleans the directories that are created for this id. -func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (string, error) { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - mount := d.active[id] - if mount != nil { - mount.count++ - return mount.path, nil - } - - mount = &ActiveMount{count: 1} - - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return "", err - } - - // If id has a root, just return it - rootDir := path.Join(dir, "root") - if _, err := os.Stat(rootDir); err == nil { - mount.path = rootDir - d.active[id] = mount - return mount.path, nil - } - - lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) - if err != nil { - return "", err - } - lowerDir := path.Join(d.dir(string(lowerID)), "root") - upperDir := path.Join(dir, "upper") - workDir := path.Join(dir, "work") - mergedDir := path.Join(dir, "merged") - - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) - if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) - } - // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a - // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err - } - mount.path = mergedDir - mount.mounted = true - d.active[id] = mount - - return mount.path, nil -} - -// Put unmounts the mount path created for the give id. -func (d *Driver) Put(id string) error { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - mount := d.active[id] - if mount == nil { - logrus.Debugf("Put on a non-mounted device %s", id) - // but it might be still here - if d.Exists(id) { - mergedDir := path.Join(d.dir(id), "merged") - err := syscall.Unmount(mergedDir, 0) - if err != nil { - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) - } - } - return nil - } - - mount.count-- - if mount.count > 0 { - return nil - } - - defer delete(d.active, id) - if mount.mounted { - err := syscall.Unmount(mount.path, 0) - if err != nil { - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) - } - return err - } - return nil -} - -// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return a ErrApplyDiffFallback error. -func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { - dir := d.dir(id) - - if parent == "" { - return 0, ErrApplyDiffFallback - } - - parentRootDir := path.Join(d.dir(parent), "root") - if _, err := os.Stat(parentRootDir); err != nil { - return 0, ErrApplyDiffFallback - } - - // We now know there is a parent, and it has a "root" directory containing - // the full root filesystem. We can just hardlink it and apply the - // layer. This relies on two things: - // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container - // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) - // These are all currently true and are not expected to break - - tmpRootDir, err := ioutil.TempDir(dir, "tmproot") - if err != nil { - return 0, err - } - defer func() { - if err != nil { - os.RemoveAll(tmpRootDir) - } else { - os.RemoveAll(path.Join(dir, "upper")) - os.RemoveAll(path.Join(dir, "work")) - os.RemoveAll(path.Join(dir, "merged")) - os.RemoveAll(path.Join(dir, "lower-id")) - } - }() - - if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { - return 0, err - } - - options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} - if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { - return 0, err - } - - rootDir := path.Join(dir, "root") - if err := os.Rename(tmpRootDir, rootDir); err != nil { - return 0, err - } - - return -} - -// Exists checks to see if the id is already mounted. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go deleted file mode 100644 index 3dbb4de4..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package overlay diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go deleted file mode 100644 index 262954d6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_aufs,linux - -package register - -import ( - // register the aufs graphdriver - _ "github.com/docker/docker/daemon/graphdriver/aufs" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go deleted file mode 100644 index f456cc5c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_btrfs,linux - -package register - -import ( - // register the btrfs graphdriver - _ "github.com/docker/docker/daemon/graphdriver/btrfs" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go deleted file mode 100644 index bb2e9ef5..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_devicemapper,linux - -package register - -import ( - // register the devmapper graphdriver - _ "github.com/docker/docker/daemon/graphdriver/devmapper" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go deleted file mode 100644 index 3a952642..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_overlay,linux - -package register - -import ( - // register the overlay graphdriver - _ "github.com/docker/docker/daemon/graphdriver/overlay" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go deleted file mode 100644 index 98fad23b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go +++ /dev/null @@ -1,6 +0,0 @@ -package register - -import ( - // register vfs - _ "github.com/docker/docker/daemon/graphdriver/vfs" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_windows.go deleted file mode 100644 index efaa5005..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package register - -import ( - // register the windows graph driver - _ "github.com/docker/docker/daemon/graphdriver/windows" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go deleted file mode 100644 index 8c31c415..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd - -package register - -import ( - // register the zfs driver - _ "github.com/docker/docker/daemon/graphdriver/zfs" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go deleted file mode 100644 index 00d9f8ec..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go +++ /dev/null @@ -1,135 +0,0 @@ -package vfs - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - - "github.com/opencontainers/runc/libcontainer/label" -) - -var ( - // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.CopyWithTar -) - -func init() { - graphdriver.Register("vfs", Init) -} - -// Init returns a new VFS driver. -// This sets the home directory for the driver and returns NaiveDiffDriver. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -// Driver holds information about the driver, home directory of the driver. -// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. -// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. -// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver -type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -func (d *Driver) String() string { - return "vfs" -} - -// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. -func (d *Driver) Status() [][2]string { - return nil -} - -// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. -func (d *Driver) Create(id, parent, mountLabel string) error { - dir := d.dir(id) - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { - return err - } - opts := []string{"level:s0"} - if _, mountLabel, err := label.InitLabels(opts); err == nil { - label.SetFileLabel(dir, mountLabel) - } - if parent == "" { - return nil - } - parentDir, err := d.Get(parent, "") - if err != nil { - return fmt.Errorf("%s: %s", parent, err) - } - if err := CopyWithTar(parentDir, dir); err != nil { - return err - } - return nil -} - -func (d *Driver) dir(id string) string { - return filepath.Join(d.home, "dir", filepath.Base(id)) -} - -// Remove deletes the content from the directory for a given id. -func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get returns the directory for the given id. -func (d *Driver) Get(id, mountLabel string) (string, error) { - dir := d.dir(id) - if st, err := os.Stat(dir); err != nil { - return "", err - } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - return dir, nil -} - -// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. -func (d *Driver) Put(id string) error { - // The vfs driver has no runtime resources (e.g. mounts) - // to clean up, so we don't need anything here - return nil -} - -// Exists checks to see if the directory exists for the given id. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/windows/windows.go deleted file mode 100644 index 05942252..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/windows/windows.go +++ /dev/null @@ -1,670 +0,0 @@ -//+build windows - -package windows - -import ( - "crypto/sha512" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/random" - "github.com/microsoft/hcsshim" -) - -// init registers the windows graph drivers to the register. -func init() { - graphdriver.Register("windowsfilter", InitFilter) - graphdriver.Register("windowsdiff", InitDiff) -} - -const ( - // diffDriver is an hcsshim driver type - diffDriver = iota - // filterDriver is an hcsshim driver type - filterDriver -) - -// Driver represents a windows graph driver. -type Driver struct { - // info stores the shim driver information - info hcsshim.DriverInfo - // Mutex protects concurrent modification to active - sync.Mutex - // active stores references to the activated layers - active map[string]int -} - -// InitFilter returns a new Windows storage filter driver. -func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) - d := &Driver{ - info: hcsshim.DriverInfo{ - HomeDir: home, - Flavour: filterDriver, - }, - active: make(map[string]int), - } - return d, nil -} - -// InitDiff returns a new Windows differencing disk driver. -func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitDiff at %s", home) - d := &Driver{ - info: hcsshim.DriverInfo{ - HomeDir: home, - Flavour: diffDriver, - }, - active: make(map[string]int), - } - return d, nil -} - -// String returns the string representation of a driver. -func (d *Driver) String() string { - switch d.info.Flavour { - case diffDriver: - return "windowsdiff" - case filterDriver: - return "windowsfilter" - default: - return "Unknown driver flavour" - } -} - -// Status returns the status of the driver. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Windows", ""}, - } -} - -// Exists returns true if the given id is registered with this driver. -func (d *Driver) Exists(id string) bool { - rID, err := d.resolveID(id) - if err != nil { - return false - } - result, err := hcsshim.LayerExists(d.info, rID) - if err != nil { - return false - } - return result -} - -// Create creates a new layer with the given id. -func (d *Driver) Create(id, parent, mountLabel string) error { - rPId, err := d.resolveID(parent) - if err != nil { - return err - } - - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return err - } - - var layerChain []string - - parentIsInit := strings.HasSuffix(rPId, "-init") - - if !parentIsInit && rPId != "" { - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return err - } - layerChain = []string{parentPath} - } - - layerChain = append(layerChain, parentChain...) - - if parentIsInit { - if len(layerChain) == 0 { - return fmt.Errorf("Cannot create a read/write layer without a parent layer.") - } - if err := hcsshim.CreateSandboxLayer(d.info, id, layerChain[0], layerChain); err != nil { - return err - } - } else { - if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { - return err - } - } - - if _, err := os.Lstat(d.dir(parent)); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) - } - - if err := d.setLayerChain(id, layerChain); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return err - } - - return nil -} - -// dir returns the absolute path to the layer. -func (d *Driver) dir(id string) string { - return filepath.Join(d.info.HomeDir, filepath.Base(id)) -} - -// Remove unmounts and removes the dir information. -func (d *Driver) Remove(id string) error { - rID, err := d.resolveID(id) - if err != nil { - return err - } - os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail - return hcsshim.DestroyLayer(d.info, rID) -} - -// Get returns the rootfs path for the id. This will mount the dir at it's given path. -func (d *Driver) Get(id, mountLabel string) (string, error) { - logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) - var dir string - - d.Lock() - defer d.Unlock() - - rID, err := d.resolveID(id) - if err != nil { - return "", err - } - - // Getting the layer paths must be done outside of the lock. - layerChain, err := d.getLayerChain(rID) - if err != nil { - return "", err - } - - if d.active[rID] == 0 { - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { - return "", err - } - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return "", err - } - } - - mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) - if err != nil { - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return "", err - } - - d.active[rID]++ - - // If the layer has a mount path, use that. Otherwise, use the - // folder path. - if mountPath != "" { - dir = mountPath - } else { - dir = d.dir(id) - } - - return dir, nil -} - -// Put adds a new layer to the driver. -func (d *Driver) Put(id string) error { - logrus.Debugf("WindowsGraphDriver Put() id %s", id) - - rID, err := d.resolveID(id) - if err != nil { - return err - } - - d.Lock() - defer d.Unlock() - - if d.active[rID] > 1 { - d.active[rID]-- - } else if d.active[rID] == 1 { - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return err - } - if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { - return err - } - delete(d.active, rID) - } - - return nil -} - -// Cleanup ensures the information the driver stores is properly removed. -func (d *Driver) Cleanup() error { - return nil -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent string) (arch archive.Archive, err error) { - rID, err := d.resolveID(id) - if err != nil { - return - } - - // Getting the layer paths must be done outside of the lock. - layerChain, err := d.getLayerChain(rID) - if err != nil { - return - } - - d.Lock() - - // To support export, a layer must be activated but not prepared. - if d.info.Flavour == filterDriver { - if d.active[rID] == 0 { - if err = hcsshim.ActivateLayer(d.info, rID); err != nil { - d.Unlock() - return - } - defer func() { - if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) - } - }() - } else { - if err = hcsshim.UnprepareLayer(d.info, rID); err != nil { - d.Unlock() - return - } - defer func() { - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err) - } - }() - } - } - - d.Unlock() - - return d.exportLayer(rID, layerChain) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - return nil, fmt.Errorf("The Windows graphdriver does not support Changes()") -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { - rPId, err := d.resolveID(parent) - if err != nil { - return - } - - if d.info.Flavour == diffDriver { - start := time.Now().UTC() - logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer") - destination := d.dir(id) - destination = filepath.Dir(destination) - if size, err = chrootarchive.ApplyUncompressedLayer(destination, diff, nil); err != nil { - return - } - logrus.Debugf("WindowsGraphDriver ApplyDiff: Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return - } - - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return - } - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return - } - layerChain := []string{parentPath} - layerChain = append(layerChain, parentChain...) - - if size, err = d.importLayer(id, diff, layerChain); err != nil { - return - } - - if err = d.setLayerChain(id, layerChain); err != nil { - return - } - - return -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - rPId, err := d.resolveID(parent) - if err != nil { - return - } - - changes, err := d.Changes(id, rPId) - if err != nil { - return - } - - layerFs, err := d.Get(id, "") - if err != nil { - return - } - defer d.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} - -// CustomImageInfo is the object returned by the driver describing the base -// image. -type CustomImageInfo struct { - ID string - Name string - Version string - Path string - Size int64 - CreatedTime time.Time -} - -// GetCustomImageInfos returns the image infos for window specific -// base images which should always be present. -func (d *Driver) GetCustomImageInfos() ([]CustomImageInfo, error) { - strData, err := hcsshim.GetSharedBaseImages() - if err != nil { - return nil, fmt.Errorf("Failed to restore base images: %s", err) - } - - type customImageInfoList struct { - Images []CustomImageInfo - } - - var infoData customImageInfoList - - if err = json.Unmarshal([]byte(strData), &infoData); err != nil { - err = fmt.Errorf("JSON unmarshal returned error=%s", err) - logrus.Error(err) - return nil, err - } - - var images []CustomImageInfo - - for _, imageData := range infoData.Images { - folderName := filepath.Base(imageData.Path) - - // Use crypto hash of the foldername to generate a docker style id. - h := sha512.Sum384([]byte(folderName)) - id := fmt.Sprintf("%x", h[:32]) - - if err := d.Create(id, "", ""); err != nil { - return nil, err - } - // Create the alternate ID file. - if err := d.setID(id, folderName); err != nil { - return nil, err - } - - imageData.ID = id - images = append(images, imageData) - } - - return images, nil -} - -// GetMetadata returns custom driver information. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - m := make(map[string]string) - m["dir"] = d.dir(id) - return m, nil -} - -// exportLayer generates an archive from a layer based on the given ID. -func (d *Driver) exportLayer(id string, parentLayerPaths []string) (arch archive.Archive, err error) { - layerFolder := d.dir(id) - - tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) - if err = os.MkdirAll(tempFolder, 0755); err != nil { - logrus.Errorf("Could not create %s %s", tempFolder, err) - return - } - defer func() { - if err != nil { - _, folderName := filepath.Split(tempFolder) - if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { - logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) - } - } - }() - - if err = hcsshim.ExportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { - return - } - - archive, err := archive.Tar(tempFolder, archive.Uncompressed) - if err != nil { - return - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - d.Put(id) - _, folderName := filepath.Split(tempFolder) - if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { - logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) - } - return err - }), nil - -} - -// importLayer adds a new layer to the tag and graph store based on the given data. -func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { - layerFolder := d.dir(id) - - tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) - if err = os.MkdirAll(tempFolder, 0755); err != nil { - logrus.Errorf("Could not create %s %s", tempFolder, err) - return - } - defer func() { - _, folderName := filepath.Split(tempFolder) - if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { - logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) - } - }() - - start := time.Now().UTC() - logrus.Debugf("Start untar layer") - if size, err = chrootarchive.ApplyLayer(tempFolder, layerData); err != nil { - return - } - err = copySysFiles(tempFolder, filepath.Join(d.info.HomeDir, "sysfile-backups", id)) - if err != nil { - return - } - logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - if err = hcsshim.ImportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { - return - } - - return -} - -// resolveID computes the layerID information based on the given id. -func (d *Driver) resolveID(id string) (string, error) { - content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) - if os.IsNotExist(err) { - return id, nil - } else if err != nil { - return "", err - } - return string(content), nil -} - -// setID stores the layerId in disk. -func (d *Driver) setID(id, altID string) error { - err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) - if err != nil { - return err - } - return nil -} - -// getLayerChain returns the layer chain information. -func (d *Driver) getLayerChain(id string) ([]string, error) { - jPath := filepath.Join(d.dir(id), "layerchain.json") - content, err := ioutil.ReadFile(jPath) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("Unable to read layerchain file - %s", err) - } - - var layerChain []string - err = json.Unmarshal(content, &layerChain) - if err != nil { - return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) - } - - return layerChain, nil -} - -// setLayerChain stores the layer chain information in disk. -func (d *Driver) setLayerChain(id string, chain []string) error { - content, err := json.Marshal(&chain) - if err != nil { - return fmt.Errorf("Failed to marshall layerchain json - %s", err) - } - - jPath := filepath.Join(d.dir(id), "layerchain.json") - err = ioutil.WriteFile(jPath, content, 0600) - if err != nil { - return fmt.Errorf("Unable to write layerchain file - %s", err) - } - - return nil -} - -// DiffPath returns a directory that contains files needed to construct layer diff. -func (d *Driver) DiffPath(id string) (path string, release func() error, err error) { - id, err = d.resolveID(id) - if err != nil { - return - } - - // Getting the layer paths must be done outside of the lock. - layerChain, err := d.getLayerChain(id) - if err != nil { - return - } - - layerFolder := d.dir(id) - tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) - if err = os.MkdirAll(tempFolder, 0755); err != nil { - logrus.Errorf("Could not create %s %s", tempFolder, err) - return - } - - defer func() { - if err != nil { - _, folderName := filepath.Split(tempFolder) - if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { - logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) - } - } - }() - - if err = hcsshim.ExportLayer(d.info, id, tempFolder, layerChain); err != nil { - return - } - - err = copySysFiles(filepath.Join(d.info.HomeDir, "sysfile-backups", id), tempFolder) - if err != nil { - return - } - - return tempFolder, func() error { - // TODO: activate layers and release here? - _, folderName := filepath.Split(tempFolder) - return hcsshim.DestroyLayer(d.info, folderName) - }, nil -} - -var sysFileWhiteList = []string{ - "Hives\\*", - "Files\\BOOTNXT", - "tombstones.txt", -} - -// note this only handles files -func copySysFiles(src string, dest string) error { - if err := os.MkdirAll(dest, 0700); err != nil { - return err - } - return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { - rel, err := filepath.Rel(src, path) - if err != nil { - return err - } - for _, sysfile := range sysFileWhiteList { - if matches, err := filepath.Match(sysfile, rel); err != nil || !matches { - continue - } - - fi, err := os.Lstat(path) - if err != nil { - return err - } - - if !fi.Mode().IsRegular() { - continue - } - - targetPath := filepath.Join(dest, rel) - if err = os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { - return err - } - - in, err := os.Open(path) - if err != nil { - return err - } - out, err := os.Create(targetPath) - if err != nil { - in.Close() - return err - } - _, err = io.Copy(out, in) - in.Close() - out.Close() - if err != nil { - return err - } - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS deleted file mode 100644 index 9c270c54..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Jörg Thalheim (@Mic92) -Arthur Gautier (@baloose) diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go deleted file mode 100644 index e0967983..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go +++ /dev/null @@ -1,333 +0,0 @@ -// +build linux freebsd - -package zfs - -import ( - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - zfs "github.com/mistifyio/go-zfs" - "github.com/opencontainers/runc/libcontainer/label" -) - -type zfsOptions struct { - fsName string - mountPath string -} - -func init() { - graphdriver.Register("zfs", Init) -} - -// Logger returns a zfs logger implementation. -type Logger struct{} - -// Log wraps log message from ZFS driver with a prefix '[zfs]'. -func (*Logger) Log(cmd []string) { - logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) -} - -// Init returns a new ZFS driver. -// It takes base mount path and a array of options which are represented as key value pairs. -// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. -func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - var err error - - if _, err := exec.LookPath("zfs"); err != nil { - logrus.Debugf("[zfs] zfs command is not available: %v", err) - return nil, graphdriver.ErrPrerequisites - } - - file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) - if err != nil { - logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) - return nil, graphdriver.ErrPrerequisites - } - defer file.Close() - - options, err := parseOptions(opt) - if err != nil { - return nil, err - } - options.mountPath = base - - rootdir := path.Dir(base) - - if options.fsName == "" { - err = checkRootdirFs(rootdir) - if err != nil { - return nil, err - } - } - - if options.fsName == "" { - options.fsName, err = lookupZfsDataset(rootdir) - if err != nil { - return nil, err - } - } - - zfs.SetLogger(new(Logger)) - - filesystems, err := zfs.Filesystems(options.fsName) - if err != nil { - return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) - } - - filesystemsCache := make(map[string]bool, len(filesystems)) - var rootDataset *zfs.Dataset - for _, fs := range filesystems { - if fs.Name == options.fsName { - rootDataset = fs - } - filesystemsCache[fs.Name] = true - } - - if rootDataset == nil { - return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) - } - - d := &Driver{ - dataset: rootDataset, - options: options, - filesystemsCache: filesystemsCache, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (zfsOptions, error) { - var options zfsOptions - options.fsName = "" - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, err - } - key = strings.ToLower(key) - switch key { - case "zfs.fsname": - options.fsName = val - default: - return options, fmt.Errorf("Unknown option %s", key) - } - } - return options, nil -} - -func lookupZfsDataset(rootdir string) (string, error) { - var stat syscall.Stat_t - if err := syscall.Stat(rootdir, &stat); err != nil { - return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - wantedDev := stat.Dev - - mounts, err := mount.GetMounts() - if err != nil { - return "", err - } - for _, m := range mounts { - if err := syscall.Stat(m.Mountpoint, &stat); err != nil { - logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) - continue // may fail on fuse file systems - } - - if stat.Dev == wantedDev && m.Fstype == "zfs" { - return m.Source, nil - } - } - - return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) -} - -// Driver holds information about the driver, such as zfs dataset, options and cache. -type Driver struct { - dataset *zfs.Dataset - options zfsOptions - sync.Mutex // protects filesystem cache against concurrent access - filesystemsCache map[string]bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -func (d *Driver) String() string { - return "zfs" -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// Status returns information about the ZFS filesystem. It returns a two dimensional array of information -// such as pool name, dataset name, disk usage, parent quota and compression used. -// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', -// 'Space Available', 'Parent Quota' and 'Compression'. -func (d *Driver) Status() [][2]string { - parts := strings.Split(d.dataset.Name, "/") - pool, err := zfs.GetZpool(parts[0]) - - var poolName, poolHealth string - if err == nil { - poolName = pool.Name - poolHealth = pool.Health - } else { - poolName = fmt.Sprintf("error while getting pool information %v", err) - poolHealth = "not available" - } - - quota := "no" - if d.dataset.Quota != 0 { - quota = strconv.FormatUint(d.dataset.Quota, 10) - } - - return [][2]string{ - {"Zpool", poolName}, - {"Zpool Health", poolHealth}, - {"Parent Dataset", d.dataset.Name}, - {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, - {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, - {"Parent Quota", quota}, - {"Compression", d.dataset.Compression}, - } -} - -// GetMetadata returns image/container metadata related to graph driver -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -func (d *Driver) cloneFilesystem(name, parentName string) error { - snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) - parentDataset := zfs.Dataset{Name: parentName} - snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) - if err != nil { - return err - } - - _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) - if err == nil { - d.Lock() - d.filesystemsCache[name] = true - d.Unlock() - } - - if err != nil { - snapshot.Destroy(zfs.DestroyDeferDeletion) - return err - } - return snapshot.Destroy(zfs.DestroyDeferDeletion) -} - -func (d *Driver) zfsPath(id string) string { - return d.options.fsName + "/" + id -} - -func (d *Driver) mountPath(id string) string { - return path.Join(d.options.mountPath, "graph", getMountpoint(id)) -} - -// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. -func (d *Driver) Create(id string, parent string, mountLabel string) error { - err := d.create(id, parent) - if err == nil { - return nil - } - if zfsError, ok := err.(*zfs.Error); ok { - if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { - return err - } - // aborted build -> cleanup - } else { - return err - } - - dataset := zfs.Dataset{Name: d.zfsPath(id)} - if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { - return err - } - - // retry - return d.create(id, parent) -} - -func (d *Driver) create(id, parent string) error { - name := d.zfsPath(id) - if parent == "" { - mountoptions := map[string]string{"mountpoint": "legacy"} - fs, err := zfs.CreateFilesystem(name, mountoptions) - if err == nil { - d.Lock() - d.filesystemsCache[fs.Name] = true - d.Unlock() - } - return err - } - return d.cloneFilesystem(name, d.zfsPath(parent)) -} - -// Remove deletes the dataset, filesystem and the cache for the given id. -func (d *Driver) Remove(id string) error { - name := d.zfsPath(id) - dataset := zfs.Dataset{Name: name} - err := dataset.Destroy(zfs.DestroyRecursive) - if err == nil { - d.Lock() - delete(d.filesystemsCache, name) - d.Unlock() - } - return err -} - -// Get returns the mountpoint for the given id after creating the target directories if necessary. -func (d *Driver) Get(id, mountLabel string) (string, error) { - mountpoint := d.mountPath(id) - filesystem := d.zfsPath(id) - options := label.FormatMountLabel("", mountLabel) - logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { - return "", err - } - - err = mount.Mount(filesystem, mountpoint, "zfs", options) - if err != nil { - return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) - } - - return mountpoint, nil -} - -// Put removes the existing mountpoint for the given id if it exists. -func (d *Driver) Put(id string) error { - mountpoint := d.mountPath(id) - logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) - - if err := mount.Unmount(mountpoint); err != nil { - return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) - } - return nil -} - -// Exists checks to see if the cache entry exists for the given id. -func (d *Driver) Exists(id string) bool { - return d.filesystemsCache[d.zfsPath(id)] == true -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go deleted file mode 100644 index 1c05fa79..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go +++ /dev/null @@ -1,38 +0,0 @@ -package zfs - -import ( - "fmt" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" -) - -func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] - if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites - } - - return nil -} - -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } - - return id[:maxlen] -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go deleted file mode 100644 index 52ed5160..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go +++ /dev/null @@ -1,27 +0,0 @@ -package zfs - -import ( - "fmt" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" -) - -func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites - } - - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go deleted file mode 100644 index 643b169b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd - -package zfs - -func checkRootdirFs(rootdir string) error { - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest deleted file mode 100644 index a1f02a62..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 2, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest deleted file mode 100644 index beec19a8..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest +++ /dev/null @@ -1,46 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "fsLayers": [ - { - "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest b/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest deleted file mode 100644 index b107de32..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md b/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md deleted file mode 100644 index f2c29155..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/image/spec/v1.md +++ /dev/null @@ -1,573 +0,0 @@ -# Docker Image Specification v1.0.0 - -An *Image* is an ordered collection of root filesystem changes and the -corresponding execution parameters for use within a container runtime. This -specification outlines the format of these filesystem changes and corresponding -parameters and describes how to create and use them for use with a container -runtime and execution tool. - -## Terminology - -This specification uses the following terms: - -
-
- Layer -
-
- Images are composed of layers. Image layer is a general - term which may be used to refer to one or both of the following: - -
    -
  1. The metadata for the layer, described in the JSON format.
  2. -
  3. The filesystem changes described by a layer.
  4. -
- - To refer to the former you may use the term Layer JSON or - Layer Metadata. To refer to the latter you may use the term - Image Filesystem Changeset or Image Diff. -
-
- Image JSON -
-
- Each layer has an associated JSON structure which describes some - basic information about the image such as date created, author, and the - ID of its parent image as well as execution/runtime configuration like - its entry point, default arguments, CPU/memory shares, networking, and - volumes. -
-
- Image Filesystem Changeset -
-
- Each layer has an archive of the files which have been added, changed, - or deleted relative to its parent layer. Using a layer-based or union - filesystem such as AUFS, or by computing the diff from filesystem - snapshots, the filesystem changeset can be used to present a series of - image layers as if they were one cohesive filesystem. -
-
- Image ID -
-
- Each layer is given an ID upon its creation. It is - represented as a hexadecimal encoding of 256 bits, e.g., - a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Image IDs should be sufficiently random so as to be globally unique. - 32 bytes read from /dev/urandom is sufficient for all - practical purposes. Alternatively, an image ID may be derived as a - cryptographic hash of image contents as the result is considered - indistinguishable from random. The choice is left up to implementors. -
-
- Image Parent -
-
- Most layer metadata structs contain a parent field which - refers to the Image from which another directly descends. An image - contains a separate JSON metadata file and set of changes relative to - the filesystem of its parent image. Image Ancestor and - Image Descendant are also common terms. -
-
- Image Checksum -
-
- Layer metadata structs contain a cryptographic hash of the contents of - the layer's filesystem changeset. Though the set of changes exists as a - simple Tar archive, two archives with identical filenames and content - will have different SHA digests if the last-access or last-modified - times of any entries differ. For this reason, image checksums are - generated using the TarSum algorithm which produces a cryptographic - hash of file contents and selected headers only. Details of this - algorithm are described in the separate TarSum specification. -
-
- Tag -
-
- A tag serves to map a descriptive, user-given name to any single image - ID. An image name suffix (the name component after :) is - often referred to as a tag as well, though it strictly refers to the - full name of an image. Acceptable values for a tag suffix are - implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], punctuation - characters [._-], and MUST NOT contain a : - character. -
-
- Repository -
-
- A collection of tags grouped under a common prefix (the name component - before :). For example, in an image tagged with the name - my-app:3.1.4, my-app is the Repository - component of the name. Acceptable values for repository name are - implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], and punctuation - characters [._-], however it MAY contain additional - / and : characters for organizational - purposes, with the last : character being interpreted - dividing the repository component of the name from the tag suffix - component. -
-
- -## Image JSON Description - -Here is an example image JSON file: - -``` -{ - "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", - "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", - "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", - "created": "2014-10-13T21:19:18.674353812Z", - "author": "Alyssa P. Hacker <alyspdev@example.com>", - "architecture": "amd64", - "os": "linux", - "Size": 271828, - "config": { - "User": "alice", - "Memory": 2048, - "MemorySwap": 4096, - "CpuShares": 8, - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=docker_is_a_really", - "BAR=great_tool_you_know" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {}, - }, - "WorkingDir": "/home/alice", - } -} -``` - -### Image JSON Field Descriptions - -
-
- id string -
-
- Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies - the image. -
-
- parent string -
-
- ID of the parent image. If there is no parent image then this field - should be omitted. A collection of images may share many of the same - ancestor layers. This organizational structure is strictly a tree with - any one layer having either no parent or a single parent and zero or - more descendent layers. Cycles are not allowed and implementations - should be careful to avoid creating them or iterating through a cycle - indefinitely. -
-
- created string -
-
- ISO-8601 formatted combined date and time at which the image was - created. -
-
- author string -
-
- Gives the name and/or email address of the person or entity which - created and is responsible for maintaining the image. -
-
- architecture string -
-
- The CPU architecture which the binaries in this image are built to run - on. Possible values include: -
    -
  • 386
  • -
  • amd64
  • -
  • arm
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- os string -
-
- The name of the operating system which the image is built to run on. - Possible values include: -
    -
  • darwin
  • -
  • freebsd
  • -
  • linux
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- checksum string -
-
- Image Checksum of the filesystem changeset associated with the image - layer. -
-
- Size integer -
-
- The size in bytes of the filesystem changeset associated with the image - layer. -
-
- config struct -
-
- The execution parameters which should be used as a base when running a - container using the image. This field can be null, in - which case any execution parameters should be specified at creation of - the container. - -

Container RunConfig Field Descriptions

- -
-
- User string -
-
-

The username or UID which the process in the container should - run as. This acts as a default value to use when the value is - not specified when creating a container.

- -

All of the following are valid:

- -
    -
  • user
  • -
  • uid
  • -
  • user:group
  • -
  • uid:gid
  • -
  • uid:group
  • -
  • user:gid
  • -
- -

If group/gid is not specified, the - default group and supplementary groups of the given - user/uid in /etc/passwd - from the container are applied.

-
-
- Memory integer -
-
- Memory limit (in bytes). This acts as a default value to use - when the value is not specified when creating a container. -
-
- MemorySwap integer -
-
- Total memory usage (memory + swap); set to -1 to - disable swap. This acts as a default value to use when the - value is not specified when creating a container. -
-
- CpuShares integer -
-
- CPU shares (relative weight vs. other containers). This acts as - a default value to use when the value is not specified when - creating a container. -
-
- ExposedPorts struct -
-
- A set of ports to expose from a container running this image. - This JSON structure value is unusual because it is a direct - JSON serialization of the Go type - map[string]struct{} and is represented in JSON as - an object mapping its keys to an empty object. Here is an - example: - -
{
-    "8080": {},
-    "53/udp": {},
-    "2356/tcp": {}
-}
- - Its keys can be in the format of: -
    -
  • - "port/tcp" -
  • -
  • - "port/udp" -
  • -
  • - "port" -
  • -
- with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Env array of strings -
-
- Entries are in the format of VARNAME="var value". - These values act as defaults and are merged with any specified - when creating a container. -
-
- Entrypoint array of strings -
-
- A list of arguments to use as the command to execute when the - container starts. This value acts as a default and is replaced - by an entrypoint specified when creating a container. -
-
- Cmd array of strings -
-
- Default arguments to the entry point of the container. These - values act as defaults and are replaced with any specified when - creating a container. If an Entrypoint value is - not specified, then the first entry of the Cmd - array should be interpreted as the executable to run. -
-
- Volumes struct -
-
- A set of directories which should be created as data volumes in - a container running this image. This JSON structure value is - unusual because it is a direct JSON serialization of the Go - type map[string]struct{} and is represented in - JSON as an object mapping its keys to an empty object. Here is - an example: -
{
-    "/var/my-app-data/": {},
-    "/etc/some-config.d/": {},
-}
-
-
- WorkingDir string -
-
- Sets the current working directory of the entry point process - in the container. This value acts as a default and is replaced - by a working directory specified when creating a container. -
-
-
-
- -Any extra fields in the Image JSON struct are considered implementation -specific and should be ignored by any implementations which are unable to -interpret them. - -## Creating an Image Filesystem Changeset - -An example of creating an Image Filesystem Changeset follows. - -An image root filesystem is first created as an empty directory named with the -ID of the image being created. Here is the initial empty directory structure -for the changeset for an image with ID `c3167915dc9d` ([real IDs are much -longer](#id_desc), but this example use a truncated one here for brevity. -Implementations need not name the rootfs directory in this way but it may be -convenient for keeping record of a large number of image layers.): - -``` -c3167915dc9d/ -``` - -Files and directories are then created: - -``` -c3167915dc9d/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `c3167915dc9d` directory is then committed as a plain Tar archive with -entries for the following files: - -``` -etc/my-app-config -bin/my-app-binary -bin/my-app-tools -``` - -The TarSum checksum for the archive file is then computed and placed in the -JSON metadata along with the execution parameters. - -To make changes to the filesystem of this container image, create a new -directory named with a new ID, such as `f60c56784b83`, and initialize it with -a snapshot of the parent image's root filesystem, so that the directory is -identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem -can make this very efficient: - -``` -f60c56784b83/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -This example change is going add a configuration directory at `/etc/my-app.d` -which contains a default config file. There's also a change to the -`my-app-tools` binary to handle the config layout change. The `f60c56784b83` -directory then looks like this: - -``` -f60c56784b83/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and -directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been -replaced with an updated version. Before committing this directory to a -changeset, because it has a parent image, it is first compared with the -directory tree of the parent snapshot, `f60c56784b83`, looking for files and -directories that have been added, modified, or removed. The following changeset -is found: - -``` -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -A Tar Archive is then created which contains *only* this changeset: The added -and modified files and directories in their entirety, and for each deleted item -an entry for an empty file at the same location but with the basename of the -deleted file or directory prefixed with `.wh.`. The filenames prefixed with -`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible -to create an image root filesystem which contains a file or directory with a -name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has -the following entries: - -``` -/etc/my-app.d/default.cfg -/bin/my-app-tools -/etc/.wh.my-app-config -``` - -Any given image is likely to be composed of several of these Image Filesystem -Changeset tar archives. - -## Combined Image JSON + Filesystem Changeset Format - -There is also a format for a single archive which contains complete information -about an image, including: - - - repository names/tags - - all image layer JSON files - - all tar archives of each layer filesystem changesets - -For example, here's what the full archive of `library/busybox` is (displayed in -`tree` format): - -``` -. -├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c -│   ├── VERSION -│   ├── json -│   └── layer.tar -└── repositories -``` - -There are one or more directories named with the ID for each layer in a full -image. Each of these directories contains 3 files: - - * `VERSION` - The schema version of the `json` file - * `json` - The JSON metadata for an image layer - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. - -The content of the `VERSION` files is simply the semantic version of the JSON -metadata schema: - -``` -1.0 -``` - -And the `repositories` file is another JSON file which describes names/tags: - -``` -{ - "busybox":{ - "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" - } -} -``` - -Every key in this object is the name of a repository, and maps to a collection -of tag suffixes. Each tag maps to the ID of the image represented by that tag. - -## Loading an Image Filesystem Changeset - -Unpacking a bundle of image layer JSON files and their corresponding filesystem -changesets can be done using a series of steps: - -1. Follow the parent IDs of image layers to find the root ancestor (an image -with no parent ID specified). -2. For every image layer, in order from root ancestor and descending down, -extract the contents of that layer's filesystem changeset archive into a -directory which will be used as the root of a container filesystem. - - - Extract all contents of each archive. - - Walk the directory tree once more, removing any files with the prefix - `.wh.` and the corresponding file or directory named without this prefix. - - -## Implementations - -This specification is an admittedly imperfect description of an -imperfectly-understood problem. The Docker project is, in turn, an attempt to -implement this specification. Our goal and our execution toward it will evolve -over time, but our primary concern in this specification and in our -implementation is compatibility and interoperability. diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/load.go b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/load.go deleted file mode 100644 index 9724c5ef..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/load.go +++ /dev/null @@ -1,289 +0,0 @@ -package tarexport - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/reference" -) - -func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer) error { - tmpDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { - return err - } - // read manifest, if no file then load in legacy mode - manifestPath, err := safePath(tmpDir, manifestFileName) - if err != nil { - return err - } - manifestFile, err := os.Open(manifestPath) - if err != nil { - if os.IsNotExist(err) { - return l.legacyLoad(tmpDir, outStream) - } - return manifestFile.Close() - } - defer manifestFile.Close() - - var manifest []manifestItem - if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { - return err - } - - for _, m := range manifest { - configPath, err := safePath(tmpDir, m.Config) - if err != nil { - return err - } - config, err := ioutil.ReadFile(configPath) - if err != nil { - return err - } - img, err := image.NewFromJSON(config) - if err != nil { - return err - } - var rootFS image.RootFS - rootFS = *img.RootFS - rootFS.DiffIDs = nil - - if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { - return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) - } - - for i, diffID := range img.RootFS.DiffIDs { - layerPath, err := safePath(tmpDir, m.Layers[i]) - if err != nil { - return err - } - r := rootFS - r.Append(diffID) - newLayer, err := l.ls.Get(r.ChainID()) - if err != nil { - newLayer, err = l.loadLayer(layerPath, rootFS) - if err != nil { - return err - } - } - defer layer.ReleaseAndLog(l.ls, newLayer) - if expected, actual := diffID, newLayer.DiffID(); expected != actual { - return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) - } - rootFS.Append(diffID) - } - - imgID, err := l.is.Create(config) - if err != nil { - return err - } - - for _, repoTag := range m.RepoTags { - named, err := reference.ParseNamed(repoTag) - if err != nil { - return err - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid tag %q", repoTag) - } - l.setLoadedTag(ref, imgID, outStream) - } - - } - - return nil -} - -func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS) (layer.Layer, error) { - rawTar, err := os.Open(filename) - if err != nil { - logrus.Debugf("Error reading embedded tar: %v", err) - return nil, err - } - defer rawTar.Close() - - inflatedLayerData, err := archive.DecompressStream(rawTar) - if err != nil { - return nil, err - } - defer inflatedLayerData.Close() - - return l.ls.Register(inflatedLayerData, rootFS.ChainID()) -} - -func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { - if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { - fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags - } - - if err := l.rs.AddTag(ref, imgID, true); err != nil { - return err - } - return nil -} - -func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer) error { - legacyLoadedMap := make(map[string]image.ID) - - dirs, err := ioutil.ReadDir(tmpDir) - if err != nil { - return err - } - - // every dir represents an image - for _, d := range dirs { - if d.IsDir() { - if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap); err != nil { - return err - } - } - } - - // load tags from repositories file - repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) - if err != nil { - return err - } - repositoriesFile, err := os.Open(repositoriesPath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - return repositoriesFile.Close() - } - defer repositoriesFile.Close() - - repositories := make(map[string]map[string]string) - if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { - return err - } - - for name, tagMap := range repositories { - for tag, oldID := range tagMap { - imgID, ok := legacyLoadedMap[oldID] - if !ok { - return fmt.Errorf("invalid target ID: %v", oldID) - } - named, err := reference.WithName(name) - if err != nil { - return err - } - ref, err := reference.WithTag(named, tag) - if err != nil { - return err - } - l.setLoadedTag(ref, imgID, outStream) - } - } - - return nil -} - -func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID) error { - if _, loaded := loadedMap[oldID]; loaded { - return nil - } - configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) - if err != nil { - return err - } - imageJSON, err := ioutil.ReadFile(configPath) - if err != nil { - logrus.Debugf("Error reading json: %v", err) - return err - } - - var img struct{ Parent string } - if err := json.Unmarshal(imageJSON, &img); err != nil { - return err - } - - var parentID image.ID - if img.Parent != "" { - for { - var loaded bool - if parentID, loaded = loadedMap[img.Parent]; !loaded { - if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap); err != nil { - return err - } - } else { - break - } - } - } - - // todo: try to connect with migrate code - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := l.is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) - if err != nil { - return err - } - newLayer, err := l.loadLayer(layerPath, *rootFS) - if err != nil { - return err - } - rootFS.Append(newLayer.DiffID()) - - h, err := v1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - imgID, err := l.is.Create(config) - if err != nil { - return err - } - - metadata, err := l.ls.Release(newLayer) - layer.LogReleaseMetadata(metadata) - if err != nil { - return err - } - - if parentID != "" { - if err := l.is.SetParent(imgID, parentID); err != nil { - return err - } - } - - loadedMap[oldID] = imgID - return nil -} - -func safePath(base, path string) (string, error) { - return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/save.go b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/save.go deleted file mode 100644 index b7022ac5..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/save.go +++ /dev/null @@ -1,300 +0,0 @@ -package tarexport - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/reference" -) - -type imageDescriptor struct { - refs []reference.NamedTagged - layers []string -} - -type saveSession struct { - *tarexporter - outDir string - images map[image.ID]*imageDescriptor - savedLayers map[string]struct{} -} - -func (l *tarexporter) Save(names []string, outStream io.Writer) error { - images, err := l.parseNames(names) - if err != nil { - return err - } - - return (&saveSession{tarexporter: l, images: images}).save(outStream) -} - -func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { - imgDescr := make(map[image.ID]*imageDescriptor) - - addAssoc := func(id image.ID, ref reference.Named) { - if _, ok := imgDescr[id]; !ok { - imgDescr[id] = &imageDescriptor{} - } - - if ref != nil { - var tagged reference.NamedTagged - if _, ok := ref.(reference.Canonical); ok { - return - } - var ok bool - if tagged, ok = ref.(reference.NamedTagged); !ok { - var err error - if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { - return - } - } - - for _, t := range imgDescr[id].refs { - if tagged.String() == t.String() { - return - } - } - imgDescr[id].refs = append(imgDescr[id].refs, tagged) - } - } - - for _, name := range names { - ref, err := reference.ParseNamed(name) - if err != nil { - return nil, err - } - if ref.Name() == string(digest.Canonical) { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - addAssoc(imgID, nil) - continue - } - if reference.IsNameOnly(ref) { - assocs := l.rs.ReferencesByName(ref) - for _, assoc := range assocs { - addAssoc(assoc.ImageID, assoc.Ref) - } - if len(assocs) == 0 { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - addAssoc(imgID, nil) - } - continue - } - var imgID image.ID - if imgID, err = l.rs.Get(ref); err != nil { - return nil, err - } - addAssoc(imgID, ref) - - } - return imgDescr, nil -} - -func (s *saveSession) save(outStream io.Writer) error { - s.savedLayers = make(map[string]struct{}) - - // get image json - tempDir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - - s.outDir = tempDir - reposLegacy := make(map[string]map[string]string) - - var manifest []manifestItem - - for id, imageDescr := range s.images { - if err = s.saveImage(id); err != nil { - return err - } - - var repoTags []string - var layers []string - - for _, ref := range imageDescr.refs { - if _, ok := reposLegacy[ref.Name()]; !ok { - reposLegacy[ref.Name()] = make(map[string]string) - } - reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] - repoTags = append(repoTags, ref.String()) - } - - for _, l := range imageDescr.layers { - layers = append(layers, filepath.Join(l, legacyLayerFileName)) - } - - manifest = append(manifest, manifestItem{ - Config: digest.Digest(id).Hex() + ".json", - RepoTags: repoTags, - Layers: layers, - }) - } - - if len(reposLegacy) > 0 { - reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) - f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := os.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - } - - manifestFileName := filepath.Join(tempDir, manifestFileName) - f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(manifest); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := os.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - - fs, err := archive.Tar(tempDir, archive.Uncompressed) - if err != nil { - return err - } - defer fs.Close() - - if _, err := io.Copy(outStream, fs); err != nil { - return err - } - return nil -} - -func (s *saveSession) saveImage(id image.ID) error { - img, err := s.is.Get(id) - if err != nil { - return err - } - - if len(img.RootFS.DiffIDs) == 0 { - return fmt.Errorf("empty export - not implemented") - } - - var parent digest.Digest - var layers []string - for i := range img.RootFS.DiffIDs { - v1Img := image.V1Image{} - if i == len(img.RootFS.DiffIDs)-1 { - v1Img = img.V1Image - } - rootFS := *img.RootFS - rootFS.DiffIDs = rootFS.DiffIDs[:i+1] - v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) - if err != nil { - return err - } - - v1Img.ID = v1ID.Hex() - if parent != "" { - v1Img.Parent = parent.Hex() - } - - if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { - return err - } - layers = append(layers, v1Img.ID) - parent = v1ID - } - - configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") - if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { - return err - } - if err := os.Chtimes(configFile, img.Created, img.Created); err != nil { - return err - } - - s.images[id].layers = layers - return nil -} - -func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { - if _, exists := s.savedLayers[legacyImg.ID]; exists { - return nil - } - - outDir := filepath.Join(s.outDir, legacyImg.ID) - if err := os.Mkdir(outDir, 0755); err != nil { - return err - } - - // todo: why is this version file here? - if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { - return err - } - - imageConfig, err := json.Marshal(legacyImg) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { - return err - } - - // serialize filesystem - tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) - if err != nil { - return err - } - defer tarFile.Close() - - l, err := s.ls.Get(id) - if err != nil { - return err - } - defer layer.ReleaseAndLog(s.ls, l) - - arch, err := l.TarStream() - if err != nil { - return err - } - defer arch.Close() - - if _, err := io.Copy(tarFile, arch); err != nil { - return err - } - - for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { - // todo: maybe save layer created timestamp? - if err := os.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { - return err - } - } - - s.savedLayers[legacyImg.ID] = struct{}{} - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/tarexport.go b/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/tarexport.go deleted file mode 100644 index cc8cdc85..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/image/tarexport/tarexport.go +++ /dev/null @@ -1,36 +0,0 @@ -package tarexport - -import ( - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" -) - -const ( - manifestFileName = "manifest.json" - legacyLayerFileName = "layer.tar" - legacyConfigFileName = "json" - legacyVersionFileName = "VERSION" - legacyRepositoriesFileName = "repositories" -) - -type manifestItem struct { - Config string - RepoTags []string - Layers []string -} - -type tarexporter struct { - is image.Store - ls layer.Store - rs reference.Store -} - -// NewTarExporter returns new ImageExporter for tar packages -func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store) image.Exporter { - return &tarexporter{ - is: is, - ls: ls, - rs: rs, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go deleted file mode 100644 index 2e766dd1..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "fmt" - - flag "github.com/docker/docker/pkg/mflag" -) - -var ( - i int - str string - b, b2, h bool -) - -func init() { - flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") - flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") - flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") - flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") - flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") - flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage - flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") - flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") - flag.Parse() -} -func main() { - if h { - flag.PrintDefaults() - } else { - fmt.Printf("s/#hidden/-string: %s\n", str) - fmt.Printf("b: %t\n", b) - fmt.Printf("-bool: %t\n", b2) - fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) - fmt.Printf("ARGS: %v\n", flag.Args()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 3bf2b2b6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,257 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -func newAnsiReader(nFile int) *ansiReader { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[winterm.WORD]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[winterm.WORD]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key winterm.WORD, controlState winterm.DWORD, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState winterm.DWORD) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 9f3232c0..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build windows - -package windows - -import ( - "io/ioutil" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -func newAnsiWriter(nFile int) *ansiWriter { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index 3711d988..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" -) - -// ConsoleStreams returns a wrapped version for each standard stream referencing a console, -// that handles ANSI character sequences. -func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if IsConsole(os.Stdout.Fd()) { - stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if IsConsole(os.Stderr.Fd()) { - stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index bf4c7b50..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e3325..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e3325..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e3325..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e3325..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE deleted file mode 100644 index bc756ae3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright 2012 SocialCode - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/RackSec/srslog/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/RackSec/srslog/LICENSE deleted file mode 100644 index 9269338f..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/RackSec/srslog/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015 Rackspace. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE deleted file mode 100644 index 106569e5..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE deleted file mode 100644 index 004e77fe..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE deleted file mode 100644 index b5768f89..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d2081..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code deleted file mode 100644 index 9e4bd4db..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs deleted file mode 100644 index e26cd4fc..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE deleted file mode 100644 index e06d2081..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE deleted file mode 100644 index 6daf85e9..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE deleted file mode 100644 index c8476ac0..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sameer Naik - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/tuf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/tuf/LICENSE deleted file mode 100644 index d92ae9ee..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/tuf/LICENSE +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2015, Docker Inc. -Copyright (c) 2014-2015 Prime Directive, Inc. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Prime Directive, Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE deleted file mode 100644 index 1aa91de6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2013 Tatsuo Kaniwa - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE deleted file mode 100644 index 545cf2d3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE deleted file mode 100644 index 670d88fc..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013, Georg Reinke (), Google -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md deleted file mode 100644 index ac2c0646..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -* The text contents of this website are MPL 2.0 licensed. - -* The design contents of this website are proprietary and may not be reproduced - or reused in any way other than to run the Consul website locally. The license - for the design is owned solely by HashiCorp, Inc. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE deleted file mode 100644 index ccae99f6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2012, 2013 Ugorji Nwoke. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the author nor the names of its contributors may be used - to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md deleted file mode 100644 index 36c29d7f..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -* The text contents of this website are MPL 2.0 licensed. - -* The design contents of this website are proprietary and may not be reproduced - or reused in any way other than to run the Serf website locally. The license - for the design is owned solely by HashiCorp, Inc. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/imdario/mergo/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 68668029..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/jfrazelle/go/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/jfrazelle/go/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/jfrazelle/go/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License deleted file mode 100644 index 6b7558b6..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2011 Keith Rarick - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall -be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE deleted file mode 100644 index ca458bb3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE deleted file mode 100644 index b8b569d7..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/COPYRIGHT b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/COPYRIGHT deleted file mode 100644 index 35702b10..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/COPYRIGHT +++ /dev/null @@ -1,9 +0,0 @@ -Copyright 2009 The Go Authors. All rights reserved. Use of this source code -is governed by a BSD-style license that can be found in the LICENSE file. -Extensions of the original work are copyright (c) 2011 Miek Gieben - -Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. - -Copyright 2014 CloudFlare. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/LICENSE deleted file mode 100644 index 5763fa7f..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/dns/LICENSE +++ /dev/null @@ -1,32 +0,0 @@ -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/pkcs11/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/pkcs11/LICENSE deleted file mode 100644 index ce25d13a..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/miekg/pkcs11/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Miek Gieben. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Miek Gieben nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE deleted file mode 100644 index f4c265cf..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2014, OmniTI Computer Consulting, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE deleted file mode 100644 index bc00498c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013, Samuel Stauffer -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -* Neither the name of the author nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/seccomp/libseccomp-golang/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/seccomp/libseccomp-golang/LICENSE deleted file mode 100644 index 81cf60de..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/seccomp/libseccomp-golang/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2015 Matthew Heon -Copyright (c) 2015 Paul Moore -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -- Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE deleted file mode 100644 index 80dd96de..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2013 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE deleted file mode 100644 index e50d398e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 The AUTHORS - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE deleted file mode 100644 index 14d60424..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2014 Philip Hofer -Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE deleted file mode 100644 index 95a0f054..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE deleted file mode 100644 index 968b4538..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2013 Vaughan Newton - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE deleted file mode 100644 index 9f64db85..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE +++ /dev/null @@ -1,192 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Vishvananda Ishaya. - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE deleted file mode 100644 index 9f64db85..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE +++ /dev/null @@ -1,192 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Vishvananda Ishaya. - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/google.golang.org/grpc/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/google.golang.org/grpc/LICENSE deleted file mode 100644 index f4988b45..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2014, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE deleted file mode 100644 index f21e5408..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE b/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE deleted file mode 100644 index c157bff9..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go deleted file mode 100644 index c5987aaf..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go +++ /dev/null @@ -1,38 +0,0 @@ -package events - -const ( - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set or attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - - Type string - Action string - Actor Actor - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/image/pull_behavior.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/image/pull_behavior.go deleted file mode 100644 index d30df2ce..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/image/pull_behavior.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "fmt" -) - -// PullBehavior can be one of: never, always, or missing -type PullBehavior int - -// PullBehavior can be one of: never, always, or missing -const ( - PullNever PullBehavior = iota - PullAlways - PullMissing -) - -// ParsePullBehavior validates and converts a string into a PullBehavior -func ParsePullBehavior(pullVal string) (PullBehavior, error) { - switch pullVal { - case "never": - return PullNever, nil - case "always": - return PullAlways, nil - case "missing", "": - return PullMissing, nil - } - return PullNever, fmt.Errorf("Invalid pull behavior '%s'", pullVal) -} - -// String returns a string representation of a PullBehavior -func (p PullBehavior) String() string { - switch p { - case PullNever: - return "never" - case PullAlways: - return "always" - case PullMissing: - return "missing" - } - return "" -} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go deleted file mode 100644 index d3695ba7..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go +++ /dev/null @@ -1,124 +0,0 @@ -package time - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - var parseInLocation bool - - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseonds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md deleted file mode 100644 index 76c516e6..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go deleted file mode 100644 index 4ed43358..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package v1p19 provides specific API types for the API version 1, patch 19. -package v1p19 - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/versions/v1p20" - "github.com/docker/go-connections/nat" -) - -// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. -// Note this is not used by the Windows daemon. -type ContainerJSON struct { - *types.ContainerJSONBase - Volumes map[string]string - VolumesRW map[string]bool - Config *ContainerConfig - NetworkSettings *v1p20.NetworkSettings -} - -// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. -type ContainerConfig struct { - *container.Config - - MacAddress string - NetworkDisabled bool - ExposedPorts map[nat.Port]struct{} - - // backward compatibility, they now live in HostConfig - VolumeDriver string - Memory int64 - MemorySwap int64 - CPUShares int64 `json:"CpuShares"` - CPUSet string `json:"Cpuset"` -} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go deleted file mode 100644 index ed800061..00000000 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package v1p20 provides specific API types for the API version 1, patch 20. -package v1p20 - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-connections/nat" -) - -// ContainerJSON is a backcompatibility struct for the API 1.20 -type ContainerJSON struct { - *types.ContainerJSONBase - Mounts []types.MountPoint - Config *ContainerConfig - NetworkSettings *NetworkSettings -} - -// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 -type ContainerConfig struct { - *container.Config - - MacAddress string - NetworkDisabled bool - ExposedPorts map[nat.Port]struct{} - - // backward compatibility, they now live in HostConfig - VolumeDriver string -} - -// StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 -type StatsJSON struct { - types.Stats - Network types.NetworkStats `json:"network,omitempty"` -} - -// NetworkSettings is a backward compatible struct for APIs prior to 1.21 -type NetworkSettings struct { - types.NetworkSettingsBase - types.DefaultNetworkSettings -} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE b/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d78..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code deleted file mode 100644 index b55b37bc..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs deleted file mode 100644 index e26cd4fc..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 477be8b2..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/Godeps/_workspace/src/github.com/docker/go-units/README.md b/Godeps/_workspace/src/github.com/docker/go-units/README.md deleted file mode 100644 index 3ce4d79d..00000000 --- a/Godeps/_workspace/src/github.com/docker/go-units/README.md +++ /dev/null @@ -1,18 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code -is released under the Apache 2.0 license. The README.md file, and files in the -"docs" folder are licensed under the Creative Commons Attribution 4.0 -International License under the terms and conditions set forth in the file -"LICENSE.docs". You may obtain a duplicate copy of the same license, titled -CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go deleted file mode 100644 index 89debf6b..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go +++ /dev/null @@ -1,94 +0,0 @@ -package testutil - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "time" -) - -// GenerateTrustCA generates a new certificate authority for testing. -func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "CA Root", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateIntermediate generates an intermediate certificate for testing using -// the parent certificate (likely a CA) and the provided keys. -func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Intermediate", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateTrustCert generates a new trust certificate for testing. Unlike the -// intermediate certificates, this certificate should be used for signature -// only, not creating certificates. -func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Trust Cert", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageDigitalSignature, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md deleted file mode 100644 index 24124db2..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md +++ /dev/null @@ -1,50 +0,0 @@ -## Libtrust TLS Config Demo - -This program generates key pairs and trust files for a TLS client and server. - -To generate the keys, run: - -``` -$ go run genkeys.go -``` - -The generated files are: - -``` -$ ls -l client_data/ server_data/ -client_data/: -total 24 --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json --rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json - -server_data/: -total 24 --rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json -``` - -The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. - -To start the server, run: - -``` -$ go run server.go -``` - -This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. - -To make a request using the client, run: - -``` -$ go run client.go -``` - -This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. - -The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). - -``` -curl --cert cert.pem --key key.pem -k https://localhost:8888 -``` diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go deleted file mode 100644 index 0a699a0e..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - // Load Client Key. - clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate Client Certificate. - selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) - if err != nil { - log.Fatal(err) - } - - // Load trusted host keys. - hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - // Ensure the host we want to connect to is trusted! - host, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) - if err != nil { - log.Fatalf("%q is not a known and trusted host", host) - } - - // Generate a CA pool with the trusted host's key. - caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) - if err != nil { - log.Fatal(err) - } - - // Create HTTP Client. - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedClientCert.Raw}, - PrivateKey: clientKey.CryptoPrivateKey(), - Leaf: selfSignedClientCert, - }, - }, - RootCAs: caPool, - }, - }, - } - - var makeRequest = func(url string) { - resp, err := client.Get(url) - if err != nil { - log.Fatal(err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - - log.Println(resp.Status) - log.Println(string(body)) - } - - // Make the request to the trusted server! - makeRequest(fmt.Sprintf("https://%s", serverAddress)) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go deleted file mode 100644 index c65f3b6b..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "encoding/pem" - "fmt" - "log" - "net" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - clientPrivateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) - if err != nil { - log.Fatal(err) - } - - keyPEMBlock, err := key.PEMBlock() - if err != nil { - log.Fatal(err) - } - - encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) - fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) - - cert, err := libtrust.GenerateSelfSignedClientCert(key) - if err != nil { - log.Fatal(err) - } - - encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) - fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) - - trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - hostname, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - - trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) - if err != nil { - log.Fatal(err) - } - - caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) - if err != nil { - log.Fatal(err) - } - - encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) - fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go deleted file mode 100644 index 9dc8842a..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "log" - - "github.com/docker/libtrust" -) - -func main() { - // Generate client key. - clientKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Add a comment for the client key. - clientKey.AddExtendedField("comment", "TLS Demo Client") - - // Save the client key, public and private versions. - err = libtrust.SaveKey("client_data/private_key.pem", clientKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate server key. - serverKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Set the list of addresses to use for the server. - serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) - - // Save the server key, public and private versions. - err = libtrust.SaveKey("server_data/private_key.pem", serverKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Authorized Keys file for server. - err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Known Host Keys file for client. - err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go deleted file mode 100644 index d3cb2ea9..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go +++ /dev/null @@ -1,80 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "html" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "server_data/private_key.pem" - authorizedClientsFilename = "server_data/trusted_clients.pem" -) - -func requestHandler(w http.ResponseWriter, r *http.Request) { - clientCert := r.TLS.PeerCertificates[0] - keyID := clientCert.Subject.CommonName - log.Printf("Request from keyID: %s\n", keyID) - fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) -} - -func main() { - // Load server key. - serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate server certificate. - selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( - serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, - ) - if err != nil { - log.Fatal(err) - } - - // Load authorized client keys. - authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) - if err != nil { - log.Fatal(err) - } - - // Create CA pool using trusted client keys. - caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) - if err != nil { - log.Fatal(err) - } - - // Create TLS config, requiring client certificates. - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedServerCert.Raw}, - PrivateKey: serverKey.CryptoPrivateKey(), - Leaf: selfSignedServerCert, - }, - }, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: caPool, - } - - // Create HTTP server with simple request handler. - server := &http.Server{ - Addr: serverAddress, - Handler: http.HandlerFunc(requestHandler), - } - - // Listen and server HTTPS using the libtrust TLS config. - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - log.Fatal(err) - } - tlsListener := tls.NewListener(listener, tlsConfig) - server.Serve(tlsListener) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go deleted file mode 100644 index 72b0fc36..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go +++ /dev/null @@ -1,50 +0,0 @@ -package trustgraph - -import "github.com/docker/libtrust" - -// TrustGraph represents a graph of authorization mapping -// public keys to nodes and grants between nodes. -type TrustGraph interface { - // Verifies that the given public key is allowed to perform - // the given action on the given node according to the trust - // graph. - Verify(libtrust.PublicKey, string, uint16) (bool, error) - - // GetGrants returns an array of all grant chains which are used to - // allow the requested permission. - GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) -} - -// Grant represents a transfer of permission from one part of the -// trust graph to another. This is the only way to delegate -// permission between two different sub trees in the graph. -type Grant struct { - // Subject is the namespace being granted - Subject string - - // Permissions is a bit map of permissions - Permission uint16 - - // Grantee represents the node being granted - // a permission scope. The grantee can be - // either a namespace item or a key id where namespace - // items will always start with a '/'. - Grantee string - - // statement represents the statement used to create - // this object. - statement *Statement -} - -// Permissions -// Read node 0x01 (can read node, no sub nodes) -// Write node 0x02 (can write to node object, cannot create subnodes) -// Read subtree 0x04 (delegates read to each sub node) -// Write subtree 0x08 (delegates write to each sub node, included create on the subject) -// -// Permission shortcuts -// ReadItem = 0x01 -// WriteItem = 0x03 -// ReadAccess = 0x07 -// WriteAccess = 0x0F -// Delegate = 0x0F diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go deleted file mode 100644 index 247bfa7a..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go +++ /dev/null @@ -1,133 +0,0 @@ -package trustgraph - -import ( - "strings" - - "github.com/docker/libtrust" -) - -type grantNode struct { - grants []*Grant - children map[string]*grantNode -} - -type memoryGraph struct { - roots map[string]*grantNode -} - -func newGrantNode() *grantNode { - return &grantNode{ - grants: []*Grant{}, - children: map[string]*grantNode{}, - } -} - -// NewMemoryGraph returns a new in memory trust graph created from -// a static list of grants. This graph is immutable after creation -// and any alterations should create a new instance. -func NewMemoryGraph(grants []*Grant) TrustGraph { - roots := map[string]*grantNode{} - for _, grant := range grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - if part != "" { - node.grants = append(node.grants, grant) - } - nodes = node.children - } - } - return &memoryGraph{roots} -} - -func (g *memoryGraph) getGrants(name string) []*Grant { - nameParts := strings.Split(name, "/") - nodes := g.roots - var node *grantNode - var nodeOk bool - for _, part := range nameParts { - node, nodeOk = nodes[part] - if !nodeOk { - return nil - } - nodes = node.children - } - return node.grants -} - -func isSubName(name, sub string) bool { - if strings.HasPrefix(name, sub) { - if len(name) == len(sub) || name[len(sub)] == '/' { - return true - } - } - return false -} - -type walkFunc func(*Grant, []*Grant) bool - -func foundWalkFunc(*Grant, []*Grant) bool { - return true -} - -func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { - if visited == nil { - visited = map[*Grant]bool{} - } - grants := g.getGrants(start) - subGrants := make([]*Grant, 0, len(grants)) - for _, grant := range grants { - if visited[grant] { - continue - } - visited[grant] = true - if grant.Permission&permission == permission { - if isSubName(target, grant.Subject) { - if f(grant, chain) { - return true - } - } else { - subGrants = append(subGrants, grant) - } - } - } - for _, grant := range subGrants { - var chainCopy []*Grant - if collect { - chainCopy = make([]*Grant, len(chain)+1) - copy(chainCopy, chain) - chainCopy[len(chainCopy)-1] = grant - } else { - chainCopy = nil - } - - if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { - return true - } - } - return false -} - -func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { - return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil -} - -func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { - grants := [][]*Grant{} - collect := func(grant *Grant, chain []*Grant) bool { - grantChain := make([]*Grant, len(chain)+1) - copy(grantChain, chain) - grantChain[len(grantChain)-1] = grant - grants = append(grants, grantChain) - return false - } - g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) - return grants, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go deleted file mode 100644 index 7a74b553..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go +++ /dev/null @@ -1,227 +0,0 @@ -package trustgraph - -import ( - "crypto/x509" - "encoding/json" - "io" - "io/ioutil" - "sort" - "strings" - "time" - - "github.com/docker/libtrust" -) - -type jsonGrant struct { - Subject string `json:"subject"` - Permission uint16 `json:"permission"` - Grantee string `json:"grantee"` -} - -type jsonRevocation struct { - Subject string `json:"subject"` - Revocation uint16 `json:"revocation"` - Grantee string `json:"grantee"` -} - -type jsonStatement struct { - Revocations []*jsonRevocation `json:"revocations"` - Grants []*jsonGrant `json:"grants"` - Expiration time.Time `json:"expiration"` - IssuedAt time.Time `json:"issuedAt"` -} - -func (g *jsonGrant) Grant(statement *Statement) *Grant { - return &Grant{ - Subject: g.Subject, - Permission: g.Permission, - Grantee: g.Grantee, - statement: statement, - } -} - -// Statement represents a set of grants made from a verifiable -// authority. A statement has an expiration associated with it -// set by the authority. -type Statement struct { - jsonStatement - - signature *libtrust.JSONSignature -} - -// IsExpired returns whether the statement has expired -func (s *Statement) IsExpired() bool { - return s.Expiration.Before(time.Now().Add(-10 * time.Second)) -} - -// Bytes returns an indented json representation of the statement -// in a byte array. This value can be written to a file or stream -// without alteration. -func (s *Statement) Bytes() ([]byte, error) { - return s.signature.PrettySignature("signatures") -} - -// LoadStatement loads and verifies a statement from an input stream. -func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - js, err := libtrust.ParsePrettySignature(b, "signatures") - if err != nil { - return nil, err - } - payload, err := js.Payload() - if err != nil { - return nil, err - } - var statement Statement - err = json.Unmarshal(payload, &statement.jsonStatement) - if err != nil { - return nil, err - } - - if authority == nil { - _, err = js.Verify() - if err != nil { - return nil, err - } - } else { - _, err = js.VerifyChains(authority) - if err != nil { - return nil, err - } - } - statement.signature = js - - return &statement, nil -} - -// CreateStatements creates and signs a statement from a stream of grants -// and revocations in a JSON array. -func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) - if err != nil { - return nil, err - } - err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) - if err != nil { - return nil, err - } - statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) - statement.jsonStatement.IssuedAt = time.Now().UTC() - - b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - statement.signature, err = libtrust.NewJSONSignature(b) - if err != nil { - return nil, err - } - err = statement.signature.SignWithChain(key, chain) - if err != nil { - return nil, err - } - - return &statement, nil -} - -type statementList []*Statement - -func (s statementList) Len() int { - return len(s) -} - -func (s statementList) Less(i, j int) bool { - return s[i].IssuedAt.Before(s[j].IssuedAt) -} - -func (s statementList) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// CollapseStatements returns a single list of the valid statements as well as the -// time when the next grant will expire. -func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { - sorted := make(statementList, 0, len(statements)) - for _, statement := range statements { - if useExpired || !statement.IsExpired() { - sorted = append(sorted, statement) - } - } - sort.Sort(sorted) - - var minExpired time.Time - var grantCount int - roots := map[string]*grantNode{} - for i, statement := range sorted { - if statement.Expiration.Before(minExpired) || i == 0 { - minExpired = statement.Expiration - } - for _, grant := range statement.Grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - g := grant.Grant(statement) - grantCount = grantCount + 1 - - for _, part := range parts { - node, nodeOk := nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - node.grants = append(node.grants, g) - nodes = node.children - } - } - - for _, revocation := range statement.Revocations { - parts := strings.Split(revocation.Grantee, "/") - nodes := roots - - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - break - } - nodes = node.children - } - if node != nil { - for _, grant := range node.grants { - if isSubName(grant.Subject, revocation.Subject) { - grant.Permission = grant.Permission &^ revocation.Revocation - } - } - } - } - } - - retGrants := make([]*Grant, 0, grantCount) - for _, rootNodes := range roots { - retGrants = append(retGrants, rootNodes.grants...) - } - - return retGrants, minExpired, nil -} - -// FilterStatements filters the statements to statements including the given grants. -func FilterStatements(grants []*Grant) ([]*Statement, error) { - statements := map[*Statement]bool{} - for _, grant := range grants { - if grant.statement != nil { - statements[grant.statement] = true - } - } - retStatements := make([]*Statement, len(statements)) - var i int - for statement := range statements { - retStatements[i] = statement - i++ - } - return retStatements, nil -} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml deleted file mode 100644 index f983b60c..00000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -sudo: false - -go: - - 1.3 - - 1.4 - - 1.5 - - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index 83ab8f59..00000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false -go: - - 1.3 - - 1.4 - - 1.5 - - tip -install: - - go get golang.org/x/tools/cmd/vet -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/gorilla/mux/README.md deleted file mode 100644 index b987c9e5..00000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,240 +0,0 @@ -mux -=== -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) - -Package `gorilla/mux` implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts and paths can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -vars := mux.Vars(request) -category := vars["category"] -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.domain.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.domain.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.domain.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.domain.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.domain.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - http.ListenAndServe(":8000", r) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE b/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE deleted file mode 100644 index 27448585..00000000 --- a/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE b/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE deleted file mode 100644 index 5c97abce..00000000 --- a/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/LICENSE b/Godeps/_workspace/src/github.com/vbatts/tar-split/LICENSE deleted file mode 100644 index ca03685b..00000000 --- a/Godeps/_workspace/src/github.com/vbatts/tar-split/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors -may be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/net/LICENSE b/Godeps/_workspace/src/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/Godeps/_workspace/src/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/net/PATENTS b/Godeps/_workspace/src/golang.org/x/net/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/Godeps/_workspace/src/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go deleted file mode 100644 index e3170e33..00000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package ctxhttp - -import "net/http" - -func canceler(client *http.Client, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go deleted file mode 100644 index 56bcbadb..00000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package ctxhttp - -import "net/http" - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client *http.Client, req *http.Request) func() { - rc, ok := client.Transport.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 19106759..00000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - cancel() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/Makefile b/Makefile index 6808641f..78ca2089 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -export GOPATH:=$(CURDIR)/Godeps/_workspace:$(GOPATH) +export GO15VENDOREXPERIMENT=1 BINDIR=${DESTDIR}/usr/bin/ MANDIR=${DESTDIR}/usr/share/man diff --git a/hack/.vendor-helpers.sh b/hack/.vendor-helpers.sh new file mode 100755 index 00000000..2b080e51 --- /dev/null +++ b/hack/.vendor-helpers.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +PROJECT=github.com/runcom/skopeo + +# Downloads dependencies into vendor/ directory +mkdir -p vendor + +export GOPATH="$GOPATH:${PWD}/vendor" + +find="/usr/bin/find" + +clone() { + local vcs="$1" + local pkg="$2" + local rev="$3" + local url="$4" + + : ${url:=https://$pkg} + local target="vendor/src/$pkg" + + echo -n "$pkg @ $rev: " + + if [ -d "$target" ]; then + echo -n 'rm old, ' + rm -rf "$target" + fi + + echo -n 'clone, ' + case "$vcs" in + git) + git clone --quiet --no-checkout "$url" "$target" + ( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" ) + ;; + hg) + hg clone --quiet --updaterev "$rev" "$url" "$target" + ;; + esac + + echo -n 'rm VCS, ' + ( cd "$target" && rm -rf .{git,hg} ) + + echo -n 'rm vendor, ' + ( cd "$target" && rm -rf vendor Godeps/_workspace ) + + echo done +} + +clean() { + local packages=( + "${PROJECT}" # package main + ) + local platforms=( linux/amd64 linux/386 ) + + echo + + echo -n 'collecting import graph, ' + local IFS=$'\n' + local imports=( $( + for platform in "${platforms[@]}"; do + export GOOS="${platform%/*}"; + export GOARCH="${platform##*/}"; + go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}" + go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}" + done | grep -vE "^${PROJECT}" | sort -u + ) ) + imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") ) + unset IFS + + echo -n 'pruning unused packages, ' + findArgs=( + # This directory contains only .c and .h files which are necessary + # -path vendor/src/github.com/mattn/go-sqlite3/code + ) + for import in "${imports[@]}"; do + [ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or ) + findArgs+=( -path "vendor/src/$import" ) + done + local IFS=$'\n' + local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') ) + unset IFS + for dir in "${prune[@]}"; do + $find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';' + rmdir "$dir" 2>/dev/null || true + done + + echo -n 'pruning unused files, ' + $find vendor -type f -name '*_test.go' -exec rm -v '{}' ';' + + echo done +} + +# Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring +fix_rewritten_imports () { + local pkg="$1" + local remove="${pkg}/Godeps/_workspace/src/" + local target="vendor/src/$pkg" + + echo "$pkg: fixing rewritten imports" + $find "$target" -name \*.go -exec sed -i -e "s|\"${remove}|\"|g" {} \; +} diff --git a/hack/vendor.sh b/hack/vendor.sh new file mode 100755 index 00000000..f0a34d05 --- /dev/null +++ b/hack/vendor.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")/.." +rm -rf vendor/ +source 'hack/.vendor-helpers.sh' + +clone git github.com/codegangsta/cli c31a7975863e7810c92e2e288a9ab074f9a88f29 +clone git github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe +clone git github.com/Sirupsen/logrus v0.8.7 # logrus is a common dependency among multiple deps +clone git github.com/docker/docker v1.10.0-rc1 +clone git github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +clone git github.com/gorilla/context 14f550f51a +clone git github.com/gorilla/mux e444e69cbd +clone git golang.org/x/net 47990a1ba55743e6ef1affd3a14e5bac8553615d https://github.com/golang/net.git +clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 +clone git github.com/docker/go-connections v0.1.2 +clone git github.com/docker/engine-api v0.2.2 + +# get graph and distribution packages +clone git github.com/docker/distribution 47a064d4195a9b56133891bbb13620c3ac83a827 +clone git github.com/vbatts/tar-split v0.9.11 + +clone git github.com/opencontainers/runc 47e3f834d73e76bc2a6a585b48d2a93325b34979 # libcontainer + +clean + +mv vendor/src/* vendor/ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE rename to vendor/github.com/Azure/go-ansiterm/LICENSE diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore rename to vendor/github.com/Sirupsen/logrus/.gitignore diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml similarity index 89% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml rename to vendor/github.com/Sirupsen/logrus/.travis.yml index ec641142..2d8c0866 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -1,5 +1,6 @@ language: go go: + - 1.2 - 1.3 - 1.4 - tip diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md similarity index 76% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md rename to vendor/github.com/Sirupsen/logrus/CHANGELOG.md index ecc84327..78f98959 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -1,11 +1,3 @@ -# 0.9.0 (Unreleased) - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository - # 0.8.7 * logrus/core: fix possible race (#216) diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE rename to vendor/github.com/Sirupsen/logrus/LICENSE diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md similarity index 83% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md rename to vendor/github.com/Sirupsen/logrus/README.md index 55d3a8d5..6fa6e206 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -75,12 +75,17 @@ package main import ( "os" log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) @@ -177,16 +182,13 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + "github.com/Sirupsen/logrus/hooks/airbrake" logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" "log/syslog" ) func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { @@ -196,21 +198,20 @@ func init() { } } ``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + | Hook | Description | | ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | | [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | | [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | | [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | | [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | | [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | | [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | | [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | | [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | @@ -218,9 +219,6 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | | [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | | [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | #### Level logging @@ -298,16 +296,15 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. +* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) + logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) ``` Third party logging formatters: -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -356,10 +353,5 @@ Log rotation is not provided with Logrus. Log rotation should be done by an external program (like `logrotate(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| [godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go rename to vendor/github.com/Sirupsen/logrus/doc.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go rename to vendor/github.com/Sirupsen/logrus/entry.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go rename to vendor/github.com/Sirupsen/logrus/exported.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go rename to vendor/github.com/Sirupsen/logrus/formatter.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go rename to vendor/github.com/Sirupsen/logrus/hooks.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go rename to vendor/github.com/Sirupsen/logrus/json_formatter.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go similarity index 96% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go rename to vendor/github.com/Sirupsen/logrus/logger.go index 2fdb2317..fd9804c6 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -64,12 +64,6 @@ func (logger *Logger) WithFields(fields Fields) *Entry { return NewEntry(logger).WithFields(fields) } -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - return NewEntry(logger).WithError(err) -} - func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debugf(format, args...) diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go rename to vendor/github.com/Sirupsen/logrus/logrus.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go rename to vendor/github.com/Sirupsen/logrus/terminal_bsd.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go rename to vendor/github.com/Sirupsen/logrus/terminal_linux.go diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go similarity index 83% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go rename to vendor/github.com/Sirupsen/logrus/terminal_notwindows.go index b343b3a3..4bb53760 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -12,9 +12,9 @@ import ( "unsafe" ) -// IsTerminal returns true if stderr's file descriptor is a terminal. +// IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stderr + fd := syscall.Stdout var termios Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go similarity index 85% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go rename to vendor/github.com/Sirupsen/logrus/terminal_windows.go index 0146845d..2e09f6f7 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -18,9 +18,9 @@ var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") ) -// IsTerminal returns true if stderr's file descriptor is a terminal. +// IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stderr + fd := syscall.Stdout var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go similarity index 97% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go rename to vendor/github.com/Sirupsen/logrus/text_formatter.go index 06ef2023..17cc2984 100644 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -84,9 +84,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) } f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } + f.appendKeyValue(b, "msg", entry.Message) for _, key := range keys { f.appendKeyValue(b, key, entry.Data[key]) } diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go similarity index 100% rename from Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go rename to vendor/github.com/Sirupsen/logrus/writer.go diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml b/vendor/github.com/codegangsta/cli/.travis.yml similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml rename to vendor/github.com/codegangsta/cli/.travis.yml diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/vendor/github.com/codegangsta/cli/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE rename to vendor/github.com/codegangsta/cli/LICENSE diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md b/vendor/github.com/codegangsta/cli/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/README.md rename to vendor/github.com/codegangsta/cli/README.md diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/app.go rename to vendor/github.com/codegangsta/cli/app.go diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go b/vendor/github.com/codegangsta/cli/cli.go similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/cli.go rename to vendor/github.com/codegangsta/cli/cli.go diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/command.go rename to vendor/github.com/codegangsta/cli/command.go diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/context.go rename to vendor/github.com/codegangsta/cli/context.go diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/flag.go rename to vendor/github.com/codegangsta/cli/flag.go diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go similarity index 100% rename from Godeps/_workspace/src/github.com/codegangsta/cli/help.go rename to vendor/github.com/codegangsta/cli/help.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml b/vendor/github.com/docker/distribution/.drone.yml similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/.drone.yml rename to vendor/github.com/docker/distribution/.drone.yml diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/.gitignore rename to vendor/github.com/docker/distribution/.gitignore diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/.mailmap rename to vendor/github.com/docker/distribution/.mailmap diff --git a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/AUTHORS rename to vendor/github.com/docker/distribution/AUTHORS diff --git a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md rename to vendor/github.com/docker/distribution/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/Dockerfile rename to vendor/github.com/docker/distribution/Dockerfile diff --git a/Godeps/_workspace/src/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/LICENSE rename to vendor/github.com/docker/distribution/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS rename to vendor/github.com/docker/distribution/MAINTAINERS diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/Makefile rename to vendor/github.com/docker/distribution/Makefile diff --git a/Godeps/_workspace/src/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/README.md rename to vendor/github.com/docker/distribution/README.md diff --git a/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md rename to vendor/github.com/docker/distribution/ROADMAP.md diff --git a/Godeps/_workspace/src/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/blobs.go rename to vendor/github.com/docker/distribution/blobs.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/circle.yml rename to vendor/github.com/docker/distribution/circle.yml diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/context.go rename to vendor/github.com/docker/distribution/context/context.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/doc.go rename to vendor/github.com/docker/distribution/context/doc.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/http.go rename to vendor/github.com/docker/distribution/context/http.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/logger.go rename to vendor/github.com/docker/distribution/context/logger.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/trace.go rename to vendor/github.com/docker/distribution/context/trace.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/util.go rename to vendor/github.com/docker/distribution/context/util.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/context/version.go rename to vendor/github.com/docker/distribution/context/version.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh old mode 100644 new mode 100755 similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh rename to vendor/github.com/docker/distribution/coverpkg.sh diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go rename to vendor/github.com/docker/distribution/digest/digest.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go rename to vendor/github.com/docker/distribution/digest/digester.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go rename to vendor/github.com/docker/distribution/digest/doc.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/digest/set.go rename to vendor/github.com/docker/distribution/digest/set.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go rename to vendor/github.com/docker/distribution/digest/verifiers.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/doc.go rename to vendor/github.com/docker/distribution/doc.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/errors.go rename to vendor/github.com/docker/distribution/errors.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/doc.go rename to vendor/github.com/docker/distribution/manifest/doc.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go rename to vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go rename to vendor/github.com/docker/distribution/manifest/schema1/config_builder.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go rename to vendor/github.com/docker/distribution/manifest/schema1/manifest.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go rename to vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go rename to vendor/github.com/docker/distribution/manifest/schema1/sign.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go rename to vendor/github.com/docker/distribution/manifest/schema1/verify.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go rename to vendor/github.com/docker/distribution/manifest/schema2/builder.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go rename to vendor/github.com/docker/distribution/manifest/schema2/manifest.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go rename to vendor/github.com/docker/distribution/manifest/versioned.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go similarity index 89% rename from Godeps/_workspace/src/github.com/docker/distribution/manifests.go rename to vendor/github.com/docker/distribution/manifests.go index 1acb0500..aec28e97 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -2,7 +2,7 @@ package distribution import ( "fmt" - "mime" + "strings" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" @@ -69,9 +69,7 @@ type Describable interface { // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } + mediaTypes = append(mediaTypes, t) } return } @@ -84,23 +82,19 @@ var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshall functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of + // Need to look up by the actual content type, not the raw contents of // the header. Strip semicolons and anything following them. var mediatype string - if ctHeader != "" { - var err error - mediatype, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } + semicolonIndex := strings.Index(ctHeader, ";") + if semicolonIndex != -1 { + mediatype = ctHeader[:semicolonIndex] + } else { + mediatype = ctHeader } unmarshalFunc, ok := mappings[mediatype] if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) - } + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype) } return unmarshalFunc(p) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go rename to vendor/github.com/docker/distribution/reference/reference.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go rename to vendor/github.com/docker/distribution/reference/regexp.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry.go rename to vendor/github.com/docker/distribution/registry.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go similarity index 96% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go rename to vendor/github.com/docker/distribution/registry/api/errcode/errors.go index 6d9bb4b6..9a405d21 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -69,15 +69,6 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go rename to vendor/github.com/docker/distribution/registry/api/errcode/handler.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go rename to vendor/github.com/docker/distribution/registry/api/errcode/register.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go rename to vendor/github.com/docker/distribution/registry/api/v2/descriptors.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go rename to vendor/github.com/docker/distribution/registry/api/v2/doc.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go rename to vendor/github.com/docker/distribution/registry/api/v2/errors.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go rename to vendor/github.com/docker/distribution/registry/api/v2/routes.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go rename to vendor/github.com/docker/distribution/registry/api/v2/urls.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go rename to vendor/github.com/docker/distribution/registry/client/auth/api_version.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go rename to vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go rename to vendor/github.com/docker/distribution/registry/client/auth/session.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go rename to vendor/github.com/docker/distribution/registry/client/blob_writer.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go similarity index 74% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go rename to vendor/github.com/docker/distribution/registry/client/errors.go index a528a865..8e3cb108 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -31,26 +31,13 @@ func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { +func parseHTTPErrorResponse(r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - if statusCode == http.StatusUnauthorized { - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - } - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, @@ -66,14 +53,14 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { // range. func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.StatusCode, resp.Body) + return parseHTTPErrorResponse(resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go rename to vendor/github.com/docker/distribution/registry/client/repository.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go rename to vendor/github.com/docker/distribution/registry/client/transport/http_reader.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go rename to vendor/github.com/docker/distribution/registry/client/transport/transport.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go rename to vendor/github.com/docker/distribution/registry/storage/cache/cache.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go rename to vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go rename to vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/tags.go rename to vendor/github.com/docker/distribution/tags.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go rename to vendor/github.com/docker/distribution/uuid/uuid.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/LICENSE rename to vendor/github.com/docker/docker/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/api/README.md rename to vendor/github.com/docker/docker/api/README.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go similarity index 99% rename from Godeps/_workspace/src/github.com/docker/docker/api/common.go rename to vendor/github.com/docker/docker/api/common.go index 51be1e27..96064a83 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -18,7 +18,7 @@ import ( // Common constants for daemon and client. const ( // Version of Current REST API - DefaultVersion version.Version = "1.23" + DefaultVersion version.Version = "1.22" // MinVersion represents Minimum REST API version supported MinVersion version.Version = "1.12" diff --git a/Godeps/_workspace/src/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/cliconfig/config.go rename to vendor/github.com/docker/docker/cliconfig/config.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE rename to vendor/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE rename to vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go rename to vendor/github.com/docker/docker/daemon/graphdriver/driver.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go rename to vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_linux.go rename to vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go rename to vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver_windows.go rename to vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go rename to vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin.go rename to vendor/github.com/docker/docker/daemon/graphdriver/plugin.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go rename to vendor/github.com/docker/docker/daemon/graphdriver/plugin_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/proxy.go rename to vendor/github.com/docker/docker/daemon/graphdriver/proxy.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/metadata.go rename to vendor/github.com/docker/docker/distribution/metadata/metadata.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v1_id_service.go rename to vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/metadata/v2_metadata_service.go rename to vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go similarity index 90% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/pull.go rename to vendor/github.com/docker/docker/distribution/pull.go index ab8c14ce..5f38a676 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull.go +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -3,6 +3,7 @@ package distribution import ( "fmt" "os" + "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" @@ -96,12 +97,13 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo } var ( - lastErr error + // use a slice to append the error strings and return a joined string to caller + errors []string // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport - // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors. // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of - // any subsequent ErrNoSupport errors in lastErr. + // any subsequent ErrNoSupport errors in errors. // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant // error is the ones from v2 endpoints not v1. @@ -121,7 +123,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo puller, err := newPuller(endpoint, repoInfo, imagePullConfig) if err != nil { - lastErr = err + errors = append(errors, err.Error()) continue } if err := puller.Pull(ctx, ref); err != nil { @@ -142,28 +144,34 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. discardNoSupportErrors = true // append subsequent errors - lastErr = err + errors = append(errors, err.Error()) } else if !discardNoSupportErrors { // Save the ErrNoSupport error, because it's either the first error or all encountered errors // were also ErrNoSupport errors. // append subsequent errors - lastErr = err + errors = append(errors, err.Error()) } continue } - logrus.Debugf("Not continuing with error: %v", err) - return err + errors = append(errors, err.Error()) + logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n"))) + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } } imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") return nil } - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + if len(errors) == 0 { + return fmt.Errorf("no endpoints found for %s", ref.String()) } - return lastErr + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil } // writeStatus writes a status message to out. If layersDownloaded is true, the diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v1.go rename to vendor/github.com/docker/docker/distribution/pull_v1.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2.go rename to vendor/github.com/docker/docker/distribution/pull_v2.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_unix.go rename to vendor/github.com/docker/docker/distribution/pull_v2_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/pull_v2_windows.go rename to vendor/github.com/docker/docker/distribution/pull_v2_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/push.go rename to vendor/github.com/docker/docker/distribution/push.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/push_v1.go rename to vendor/github.com/docker/docker/distribution/push_v1.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/push_v2.go rename to vendor/github.com/docker/docker/distribution/push_v2.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go similarity index 96% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/registry.go rename to vendor/github.com/docker/docker/distribution/registry.go index 1c2b4f3c..1d4a2c4e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/distribution/registry.go +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -6,7 +6,6 @@ import ( "net/http" "net/url" "strings" - "syscall" "time" "github.com/docker/distribution" @@ -146,14 +145,8 @@ func retryOnError(err error) error { case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: return xfer.DoNotRetry{Err: err} } - case *url.Error: - return retryOnError(v.Err) case *client.UnexpectedHTTPResponseError: return xfer.DoNotRetry{Err: err} - case error: - if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { - return xfer.DoNotRetry{Err: err} - } } // let's be nice and fallback if the error is a completely // unexpected one. diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/download.go rename to vendor/github.com/docker/docker/distribution/xfer/download.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/transfer.go rename to vendor/github.com/docker/docker/distribution/xfer/transfer.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/distribution/xfer/upload.go rename to vendor/github.com/docker/docker/distribution/xfer/upload.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/dockerversion/version_lib.go rename to vendor/github.com/docker/docker/dockerversion/version_lib.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/fs.go rename to vendor/github.com/docker/docker/image/fs.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/image.go rename to vendor/github.com/docker/docker/image/image.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/rootfs.go rename to vendor/github.com/docker/docker/image/rootfs.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_unix.go b/vendor/github.com/docker/docker/image/rootfs_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/rootfs_unix.go rename to vendor/github.com/docker/docker/image/rootfs_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/rootfs_windows.go b/vendor/github.com/docker/docker/image/rootfs_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/rootfs_windows.go rename to vendor/github.com/docker/docker/image/rootfs_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/store.go rename to vendor/github.com/docker/docker/image/store.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/image/v1/imagev1.go rename to vendor/github.com/docker/docker/image/v1/imagev1.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/empty.go rename to vendor/github.com/docker/docker/layer/empty.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go similarity index 90% rename from Godeps/_workspace/src/github.com/docker/docker/layer/filestore.go rename to vendor/github.com/docker/docker/layer/filestore.go index a0044b36..236c9ba5 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/layer/filestore.go +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -10,7 +10,6 @@ import ( "path/filepath" "regexp" "strconv" - "strings" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" @@ -155,7 +154,7 @@ func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { return "", err } - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + dgst, err := digest.ParseDigest(string(content)) if err != nil { return "", err } @@ -169,7 +168,7 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { return "", err } - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + dgst, err := digest.ParseDigest(string(content)) if err != nil { return "", err } @@ -178,17 +177,16 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { } func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) if err != nil { return "", err } - content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { + if !stringIDRegexp.MatchString(string(content)) { return "", errors.New("invalid cache id value") } - return content, nil + return string(content), nil } func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { @@ -229,34 +227,32 @@ func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error } func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) if err != nil { return "", err } - content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { + if !stringIDRegexp.MatchString(string(content)) { return "", errors.New("invalid mount id value") } - return content, nil + return string(content), nil } func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) if err != nil { if os.IsNotExist(err) { return "", nil } return "", err } - content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { + if !stringIDRegexp.MatchString(string(content)) { return "", errors.New("invalid init id value") } - return content, nil + return string(content), nil } func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { @@ -268,7 +264,7 @@ func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { return "", err } - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + dgst, err := digest.ParseDigest(string(content)) if err != nil { return "", err } diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/layer.go rename to vendor/github.com/docker/docker/layer/layer.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go similarity index 97% rename from Godeps/_workspace/src/github.com/docker/docker/layer/layer_store.go rename to vendor/github.com/docker/docker/layer/layer_store.go index 619c1a30..495841f4 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_store.go +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -86,7 +86,6 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (St l, err := ls.loadLayer(id) if err != nil { logrus.Debugf("Failed to load layer %s: %s", id, err) - continue } if l.parent != nil { l.parent.referenceCount++ @@ -110,22 +109,22 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { diff, err := ls.store.GetDiffID(layer) if err != nil { - return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + return nil, err } size, err := ls.store.GetSize(layer) if err != nil { - return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + return nil, err } cacheID, err := ls.store.GetCacheID(layer) if err != nil { - return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + return nil, err } parent, err := ls.store.GetParent(layer) if err != nil { - return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + return nil, err } cl = &roLayer{ diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/layer_unix.go rename to vendor/github.com/docker/docker/layer/layer_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/layer_windows.go rename to vendor/github.com/docker/docker/layer/layer_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/migration.go rename to vendor/github.com/docker/docker/layer/migration.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/mounted_layer.go rename to vendor/github.com/docker/docker/layer/mounted_layer.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/layer/ro_layer.go rename to vendor/github.com/docker/docker/layer/ro_layer.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go rename to vendor/github.com/docker/docker/opts/hosts.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/hosts_unix.go rename to vendor/github.com/docker/docker/opts/hosts_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go rename to vendor/github.com/docker/docker/opts/hosts_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/ip.go rename to vendor/github.com/docker/docker/opts/ip.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/opts.go rename to vendor/github.com/docker/docker/opts/opts.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/opts_unix.go rename to vendor/github.com/docker/docker/opts/opts_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go rename to vendor/github.com/docker/docker/opts/opts_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md rename to vendor/github.com/docker/docker/pkg/archive/README.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go rename to vendor/github.com/docker/docker/pkg/archive/archive.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go rename to vendor/github.com/docker/docker/pkg/archive/archive_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go rename to vendor/github.com/docker/docker/pkg/archive/archive_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go rename to vendor/github.com/docker/docker/pkg/archive/changes.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_linux.go rename to vendor/github.com/docker/docker/pkg/archive/changes_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_other.go rename to vendor/github.com/docker/docker/pkg/archive/changes_other.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_unix.go rename to vendor/github.com/docker/docker/pkg/archive/changes_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_windows.go rename to vendor/github.com/docker/docker/pkg/archive/changes_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy.go rename to vendor/github.com/docker/docker/pkg/archive/copy.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_unix.go rename to vendor/github.com/docker/docker/pkg/archive/copy_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/copy_windows.go rename to vendor/github.com/docker/docker/pkg/archive/copy_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go rename to vendor/github.com/docker/docker/pkg/archive/diff.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go rename to vendor/github.com/docker/docker/pkg/archive/example_changes.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go rename to vendor/github.com/docker/docker/pkg/archive/time_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go rename to vendor/github.com/docker/docker/pkg/archive/time_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/whiteouts.go rename to vendor/github.com/docker/docker/pkg/archive/whiteouts.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go rename to vendor/github.com/docker/docker/pkg/archive/wrap.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/archive.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_unix.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/archive_windows.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/diff.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_unix.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/diff_windows.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_unix.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/chrootarchive/init_windows.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go rename to vendor/github.com/docker/docker/pkg/fileutils/fileutils.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go rename to vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go rename to vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir.go rename to vendor/github.com/docker/docker/pkg/homedir/homedir.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/httputils.go rename to vendor/github.com/docker/docker/pkg/httputils/httputils.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/mimetype.go rename to vendor/github.com/docker/docker/pkg/httputils/mimetype.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go rename to vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools.go rename to vendor/github.com/docker/docker/pkg/idtools/idtools.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_unix.go rename to vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/idtools_windows.go rename to vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go rename to vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go rename to vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go rename to vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt.go rename to vendor/github.com/docker/docker/pkg/ioutils/fmt.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader.go rename to vendor/github.com/docker/docker/pkg/ioutils/multireader.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go rename to vendor/github.com/docker/docker/pkg/ioutils/readers.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go b/vendor/github.com/docker/docker/pkg/ioutils/scheduler.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go rename to vendor/github.com/docker/docker/pkg/ioutils/scheduler.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go rename to vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_unix.go rename to vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/temp_windows.go rename to vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go rename to vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go rename to vendor/github.com/docker/docker/pkg/ioutils/writers.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog.go rename to vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go rename to vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go rename to vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/jsonlog/time_marshalling.go rename to vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go rename to vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE b/vendor/github.com/docker/docker/pkg/mflag/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE rename to vendor/github.com/docker/docker/pkg/mflag/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md b/vendor/github.com/docker/docker/pkg/mflag/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md rename to vendor/github.com/docker/docker/pkg/mflag/README.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go b/vendor/github.com/docker/docker/pkg/mflag/flag.go similarity index 99% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go rename to vendor/github.com/docker/docker/pkg/mflag/flag.go index 04683299..2ad299ac 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go +++ b/vendor/github.com/docker/docker/pkg/mflag/flag.go @@ -1163,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) { str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" } } - fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str) + fmt.Fprintf(fs.Out(), "docker: %s.\n", str) } // Parsed reports whether fs.Parse has been called. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go rename to vendor/github.com/docker/docker/pkg/pools/pools.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progress.go rename to vendor/github.com/docker/docker/pkg/progress/progress.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/progress/progressreader.go rename to vendor/github.com/docker/docker/pkg/progress/progressreader.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go rename to vendor/github.com/docker/docker/pkg/promise/promise.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/random/random.go rename to vendor/github.com/docker/docker/pkg/random/random.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/README.md rename to vendor/github.com/docker/docker/pkg/reexec/README.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go b/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_freebsd.go rename to vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_linux.go rename to vendor/github.com/docker/docker/pkg/reexec/command_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_unsupported.go rename to vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/command_windows.go rename to vendor/github.com/docker/docker/pkg/reexec/command_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go rename to vendor/github.com/docker/docker/pkg/reexec/reexec.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/README.md rename to vendor/github.com/docker/docker/pkg/stringid/README.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go rename to vendor/github.com/docker/docker/pkg/stringid/stringid.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE rename to vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD rename to vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go rename to vendor/github.com/docker/docker/pkg/system/chtimes.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go rename to vendor/github.com/docker/docker/pkg/system/errors.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go rename to vendor/github.com/docker/docker/pkg/system/events_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go rename to vendor/github.com/docker/docker/pkg/system/filesys.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go rename to vendor/github.com/docker/docker/pkg/system/filesys_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go rename to vendor/github.com/docker/docker/pkg/system/lstat.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go rename to vendor/github.com/docker/docker/pkg/system/lstat_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go rename to vendor/github.com/docker/docker/pkg/system/meminfo.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go rename to vendor/github.com/docker/docker/pkg/system/meminfo_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go rename to vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go rename to vendor/github.com/docker/docker/pkg/system/meminfo_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go rename to vendor/github.com/docker/docker/pkg/system/mknod.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go rename to vendor/github.com/docker/docker/pkg/system/mknod_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_unix.go rename to vendor/github.com/docker/docker/pkg/system/path_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/path_windows.go rename to vendor/github.com/docker/docker/pkg/system/path_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go rename to vendor/github.com/docker/docker/pkg/system/stat.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go rename to vendor/github.com/docker/docker/pkg/system/stat_freebsd.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go rename to vendor/github.com/docker/docker/pkg/system/stat_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_solaris.go rename to vendor/github.com/docker/docker/pkg/system/stat_solaris.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go rename to vendor/github.com/docker/docker/pkg/system/stat_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go rename to vendor/github.com/docker/docker/pkg/system/stat_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_unix.go rename to vendor/github.com/docker/docker/pkg/system/syscall_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go rename to vendor/github.com/docker/docker/pkg/system/syscall_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go rename to vendor/github.com/docker/docker/pkg/system/umask.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go rename to vendor/github.com/docker/docker/pkg/system/umask_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go rename to vendor/github.com/docker/docker/pkg/system/utimes_darwin.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go rename to vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go rename to vendor/github.com/docker/docker/pkg/system/utimes_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go rename to vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go rename to vendor/github.com/docker/docker/pkg/system/xattrs_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go rename to vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go rename to vendor/github.com/docker/docker/pkg/tarsum/builder_context.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go rename to vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go rename to vendor/github.com/docker/docker/pkg/tarsum/tarsum.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md rename to vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go rename to vendor/github.com/docker/docker/pkg/tarsum/versioning.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go rename to vendor/github.com/docker/docker/pkg/tarsum/writercloser.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/ascii.go rename to vendor/github.com/docker/docker/pkg/term/ascii.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go rename to vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go rename to vendor/github.com/docker/docker/pkg/term/tc_other.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go rename to vendor/github.com/docker/docker/pkg/term/term.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go similarity index 77% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go rename to vendor/github.com/docker/docker/pkg/term/term_windows.go index 798bc719..04870d1b 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -3,12 +3,14 @@ package term import ( + "fmt" "io" "os" "os/signal" "syscall" "github.com/Azure/go-ansiterm/winterm" + "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term/windows" ) @@ -121,6 +123,52 @@ func GetWinsize(fd uintptr) (*Winsize, error) { return winsize, nil } +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + + // Ensure the requested dimensions are no larger than the maximum window size + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return err + } + + if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) { + return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)", + ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y) + } + + // Narrow the sizes to that used by Windows + width := winterm.SHORT(ws.Width) + height := winterm.SHORT(ws.Height) + + // Set the dimensions while ensuring they remain within the bounds of the backing console buffer + // -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs, + // shift the upper left just enough to keep the new window within the buffer. + rect := info.Window + if width < rect.Right-rect.Left+1 { + rect.Right = rect.Left + width - 1 + } else if width > rect.Right-rect.Left+1 { + rect.Right = rect.Left + width - 1 + if rect.Right >= info.Size.X { + rect.Left = info.Size.X - width + rect.Right = info.Size.X - 1 + } + } + + if height < rect.Bottom-rect.Top+1 { + rect.Bottom = rect.Top + height - 1 + } else if height > rect.Bottom-rect.Top+1 { + rect.Bottom = rect.Top + height - 1 + if rect.Bottom >= info.Size.Y { + rect.Top = info.Size.Y - height + rect.Bottom = info.Size.Y - 1 + } + } + logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect) + + return winterm.SetConsoleWindowInfo(fd, true, rect) +} + // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { return windows.IsConsole(fd) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go rename to vendor/github.com/docker/docker/pkg/term/termios_darwin.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go rename to vendor/github.com/docker/docker/pkg/term/termios_freebsd.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go rename to vendor/github.com/docker/docker/pkg/term/termios_linux.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/README.md rename to vendor/github.com/docker/docker/pkg/useragent/README.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/useragent/useragent.go rename to vendor/github.com/docker/docker/pkg/useragent/useragent.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go b/vendor/github.com/docker/docker/pkg/version/version.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go rename to vendor/github.com/docker/docker/pkg/version/version.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/reference/reference.go rename to vendor/github.com/docker/docker/reference/reference.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/reference/store.go rename to vendor/github.com/docker/docker/reference/store.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/auth.go rename to vendor/github.com/docker/docker/registry/auth.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go b/vendor/github.com/docker/docker/registry/authchallenge.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/authchallenge.go rename to vendor/github.com/docker/docker/registry/authchallenge.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/config.go rename to vendor/github.com/docker/docker/registry/config.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/config_unix.go rename to vendor/github.com/docker/docker/registry/config_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/config_windows.go rename to vendor/github.com/docker/docker/registry/config_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go b/vendor/github.com/docker/docker/registry/endpoint.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go rename to vendor/github.com/docker/docker/registry/endpoint.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/reference.go b/vendor/github.com/docker/docker/registry/reference.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/reference.go rename to vendor/github.com/docker/docker/registry/reference.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go similarity index 97% rename from Godeps/_workspace/src/github.com/docker/docker/registry/registry.go rename to vendor/github.com/docker/docker/registry/registry.go index bacc4aed..643fa56e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -109,7 +109,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { keyName := certName[:len(certName)-5] + ".key" logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) if err != nil { @@ -122,7 +122,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) } } } diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/service.go rename to vendor/github.com/docker/docker/registry/service.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/service_v1.go rename to vendor/github.com/docker/docker/registry/service_v1.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/service_v2.go rename to vendor/github.com/docker/docker/registry/service_v2.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/session.go rename to vendor/github.com/docker/docker/registry/session.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/token.go b/vendor/github.com/docker/docker/registry/token.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/token.go rename to vendor/github.com/docker/docker/registry/token.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/registry/types.go rename to vendor/github.com/docker/docker/registry/types.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/engine-api/LICENSE rename to vendor/github.com/docker/engine-api/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go rename to vendor/github.com/docker/engine-api/types/auth.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go rename to vendor/github.com/docker/engine-api/types/blkiodev/blkio.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go similarity index 98% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/client.go rename to vendor/github.com/docker/engine-api/types/client.go index 81eb29c9..16c1cb10 100644 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go +++ b/vendor/github.com/docker/engine-api/types/client.go @@ -7,7 +7,6 @@ import ( "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/image" "github.com/docker/go-units" ) @@ -127,7 +126,7 @@ type ImageBuildOptions struct { NoCache bool Remove bool ForceRemove bool - PullParent image.PullBehavior + PullParent bool IsolationLevel container.IsolationLevel CPUSetCPUs string CPUSetMems string diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go rename to vendor/github.com/docker/engine-api/types/configs.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go rename to vendor/github.com/docker/engine-api/types/container/config.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go similarity index 97% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go rename to vendor/github.com/docker/engine-api/types/container/host_config.go index d2b7c0c5..f43263d6 100644 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go +++ b/vendor/github.com/docker/engine-api/types/container/host_config.go @@ -151,11 +151,6 @@ func (rp *RestartPolicy) IsUnlessStopped() bool { return rp.Name == "unless-stopped" } -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string @@ -195,7 +190,6 @@ type Resources struct { type UpdateConfig struct { // Contains container's resources (cgroups, ulimits) Resources - RestartPolicy RestartPolicy } // HostConfig the non-portable Config structure of a container. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go rename to vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go rename to vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go rename to vendor/github.com/docker/engine-api/types/filters/parse.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go rename to vendor/github.com/docker/engine-api/types/network/network.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go rename to vendor/github.com/docker/engine-api/types/registry/registry.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go rename to vendor/github.com/docker/engine-api/types/seccomp.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go rename to vendor/github.com/docker/engine-api/types/stats.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go rename to vendor/github.com/docker/engine-api/types/strslice/strslice.go diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go similarity index 99% rename from Godeps/_workspace/src/github.com/docker/engine-api/types/types.go rename to vendor/github.com/docker/engine-api/types/types.go index b573ff45..9666ea45 100644 --- a/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go +++ b/vendor/github.com/docker/engine-api/types/types.go @@ -142,7 +142,6 @@ type Container struct { SizeRw int64 `json:",omitempty"` SizeRootFs int64 `json:",omitempty"` Labels map[string]string - State string Status string HostConfig struct { NetworkMode string `json:",omitempty"` @@ -199,7 +198,6 @@ type Info struct { Images int Driver string DriverStatus [][2]string - SystemStatus [][2]string Plugins PluginsInfo MemoryLimit bool SwapLimit bool @@ -390,7 +388,6 @@ type NetworkResource struct { Scope string Driver string IPAM network.IPAM - Internal bool Containers map[string]EndpointResource Options map[string]string } diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-connections/LICENSE rename to vendor/github.com/docker/go-connections/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go rename to vendor/github.com/docker/go-connections/nat/nat.go diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go rename to vendor/github.com/docker/go-connections/nat/parse.go diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go rename to vendor/github.com/docker/go-connections/nat/sort.go diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go similarity index 95% rename from Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go rename to vendor/github.com/docker/go-connections/tlsconfig/config.go index c08e53a0..e3dfad1f 100644 --- a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -41,6 +41,12 @@ var acceptedCBCCiphers = []uint16{ tls.TLS_RSA_WITH_AES_128_CBC_SHA, } +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} + // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/go-units/LICENSE rename to vendor/github.com/docker/go-units/LICENSE diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 00000000..e2fb4051 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,13 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## License + +go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/Godeps/_workspace/src/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml similarity index 100% rename from Godeps/_workspace/src/github.com/docker/go-units/circle.yml rename to vendor/github.com/docker/go-units/circle.yml diff --git a/Godeps/_workspace/src/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/go-units/duration.go rename to vendor/github.com/docker/go-units/duration.go diff --git a/Godeps/_workspace/src/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/go-units/size.go rename to vendor/github.com/docker/go-units/size.go diff --git a/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go similarity index 81% rename from Godeps/_workspace/src/github.com/docker/go-units/ulimit.go rename to vendor/github.com/docker/go-units/ulimit.go index 5ac7fd82..f0a7be29 100644 --- a/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -73,34 +73,25 @@ func ParseUlimit(val string) (*Ulimit, error) { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: + limitVals := strings.SplitN(parts[1], ":", 2) + if len(limitVals) > 2 { return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + soft, err := strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err } - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil + hard := soft // in case no hard was set + if len(limitVals) == 2 { + hard, err = strconv.ParseInt(limitVals[1], 10, 64) + } + if soft > hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil } // GetRlimit returns the RLimit corresponding to Ulimit. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md rename to vendor/github.com/docker/libtrust/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE rename to vendor/github.com/docker/libtrust/LICENSE diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS rename to vendor/github.com/docker/libtrust/MAINTAINERS diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/README.md rename to vendor/github.com/docker/libtrust/README.md diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/certificates.go rename to vendor/github.com/docker/libtrust/certificates.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/doc.go rename to vendor/github.com/docker/libtrust/doc.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go rename to vendor/github.com/docker/libtrust/ec_key.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/filter.go rename to vendor/github.com/docker/libtrust/filter.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/hash.go rename to vendor/github.com/docker/libtrust/hash.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go rename to vendor/github.com/docker/libtrust/jsonsign.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/key.go rename to vendor/github.com/docker/libtrust/key.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/key_files.go rename to vendor/github.com/docker/libtrust/key_files.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go rename to vendor/github.com/docker/libtrust/key_manager.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go rename to vendor/github.com/docker/libtrust/rsa_key.go diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go similarity index 100% rename from Godeps/_workspace/src/github.com/docker/libtrust/util.go rename to vendor/github.com/docker/libtrust/util.go diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml new file mode 100644 index 00000000..d87d4657 --- /dev/null +++ b/vendor/github.com/gorilla/context/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE rename to vendor/github.com/gorilla/context/LICENSE diff --git a/Godeps/_workspace/src/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/gorilla/context/README.md rename to vendor/github.com/gorilla/context/README.md diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go similarity index 100% rename from Godeps/_workspace/src/github.com/gorilla/context/context.go rename to vendor/github.com/gorilla/context/context.go diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/gorilla/context/doc.go rename to vendor/github.com/gorilla/context/doc.go diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml new file mode 100644 index 00000000..d87d4657 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE rename to vendor/github.com/gorilla/mux/LICENSE diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 00000000..e60301b0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go similarity index 89% rename from Godeps/_workspace/src/github.com/gorilla/mux/doc.go rename to vendor/github.com/gorilla/mux/doc.go index 49798cb5..b2deed34 100644 --- a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -60,8 +60,8 @@ Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") // Matches a dynamic subdomain. r.Host("{subdomain:[a-z]+}.domain.com") @@ -89,12 +89,12 @@ There are several other matchers that can be added. To match path prefixes: r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { return r.ProtoMajor == 0 - }) + }) ...and finally, it is possible to combine several matchers in a single route: r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). + Host("www.domain.com"). Methods("GET"). Schemes("http") @@ -103,11 +103,11 @@ a way to group several routes that share the same requirements. We call it "subrouting". For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" +host is "www.domain.com". Create a route for that host and get a "subrouter" from it: r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() + s := r.Host("www.domain.com").Subrouter() Then register routes in the subrouter: @@ -116,7 +116,7 @@ Then register routes in the subrouter: s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not +"www.domain.com", because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. @@ -164,21 +164,14 @@ This also works for host variables: // url.String() will be "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - There's also a way to build only the URL host or path for a route: use the methods URLHost() or URLPath() instead. For the previous route, we would do: @@ -200,7 +193,7 @@ as well: // "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") */ package mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go similarity index 68% rename from Godeps/_workspace/src/github.com/gorilla/mux/mux.go rename to vendor/github.com/gorilla/mux/mux.go index aabe9958..5b5f8e7d 100644 --- a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -5,11 +5,9 @@ package mux import ( - "errors" "fmt" "net/http" "path" - "regexp" "github.com/gorilla/context" ) @@ -59,12 +57,6 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { return true } } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - return true - } return false } @@ -76,7 +68,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { // Clean path to canonical form and redirect. if p := cleanPath(req.URL.Path); p != req.URL.Path { - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: // http://code.google.com/p/go/issues/detail?id=5252 url := *req.URL @@ -95,7 +87,10 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { setCurrentRoute(req, match.Route) } if handler == nil { - handler = http.NotFoundHandler() + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } } if !r.KeepContext { defer context.Clear(req) @@ -157,13 +152,6 @@ func (r *Router) getRegexpGroup() *routeRegexpGroup { return nil } -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- @@ -236,58 +224,6 @@ func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } -// BuildVars registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { - continue - } - - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - // ---------------------------------------------------------------------------- // Context // ---------------------------------------------------------------------------- @@ -315,10 +251,6 @@ func Vars(r *http.Request) map[string]string { } // CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. func CurrentRoute(r *http.Request) *Route { if rv := context.Get(r, routeKey); rv != nil { return rv.(*Route) @@ -327,15 +259,11 @@ func CurrentRoute(r *http.Request) *Route { } func setVars(r *http.Request, val interface{}) { - if val != nil { - context.Set(r, varsKey, val) - } + context.Set(r, varsKey, val) } func setCurrentRoute(r *http.Request, val interface{}) { - if val != nil { - context.Set(r, routeKey, val) - } + context.Set(r, routeKey, val) } // ---------------------------------------------------------------------------- @@ -372,24 +300,13 @@ func uniqueVars(s1, s2 []string) error { return nil } -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairs(pairs ...string) (map[string]string, error) { length := len(pairs) if length%2 != 0 { - return length, fmt.Errorf( + return nil, fmt.Errorf( "mux: number of parameters must be multiple of 2, got %v", pairs) } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } m := make(map[string]string, length/2) for i := 0; i < length; i += 2 { m[pairs[i]] = pairs[i+1] @@ -397,24 +314,6 @@ func mapFromPairsToString(pairs ...string) (map[string]string, error) { return m, nil } -// mapFromPairsToRegex converts variadic string paramers to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - // matchInArray returns true if the given string value is in the array. func matchInArray(arr []string, value string) bool { for _, v := range arr { @@ -425,8 +324,9 @@ func matchInArray(arr []string, value string) bool { return false } -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { +// matchMap returns true if the given key/value pairs exist in a given map. +func matchMap(toCheck map[string]string, toMatch map[string][]string, + canonicalKey bool) bool { for k, v := range toCheck { // Check if key exists. if canonicalKey { @@ -451,31 +351,3 @@ func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, } return true } - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go similarity index 76% rename from Godeps/_workspace/src/github.com/gorilla/mux/regexp.go rename to vendor/github.com/gorilla/mux/regexp.go index 06728dd5..a6305483 100644 --- a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -10,7 +10,6 @@ import ( "net/http" "net/url" "regexp" - "strconv" "strings" ) @@ -35,7 +34,8 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash // Now let's parse it. defaultPattern := "[^/]+" if matchQuery { - defaultPattern = "[^?&]*" + defaultPattern = "[^?&]+" + matchPrefix = true } else if matchHost { defaultPattern = "[^.]+" matchPrefix = false @@ -53,7 +53,9 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) pattern := bytes.NewBufferString("") - pattern.WriteByte('^') + if !matchQuery { + pattern.WriteByte('^') + } reverse := bytes.NewBufferString("") var end int var err error @@ -73,14 +75,12 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash tpl[idxs[i]:end]) } // Build the regexp pattern. - varIdx := i / 2 - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) - // Append variable name and compiled pattern. - varsN[varIdx] = name - varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) if err != nil { return nil, err } @@ -91,12 +91,6 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash if strictSlash { pattern.WriteString("[/]?") } - if matchQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } if !matchPrefix { pattern.WriteByte('$') } @@ -147,7 +141,7 @@ type routeRegexp struct { func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if !r.matchHost { if r.matchQuery { - return r.matchQueryString(req) + return r.regexp.MatchString(req.URL.RawQuery) } else { return r.regexp.MatchString(req.URL.Path) } @@ -156,7 +150,11 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { } // url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { +func (r *routeRegexp) url(pairs ...string) (string, error) { + values, err := mapFromPairs(pairs...) + if err != nil { + return "", err + } urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] @@ -181,26 +179,6 @@ func (r *routeRegexp) url(values map[string]string) (string, error) { return rv, nil } -// getUrlQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getUrlQuery(req *http.Request) string { - if !r.matchQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getUrlQuery(req)) -} - // braceIndices returns the first level curly brace indices from a string. // It returns an error in case of unbalanced braces. func braceIndices(s string) ([]int, error) { @@ -226,11 +204,6 @@ func braceIndices(s string) ([]int, error) { return idxs, nil } -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - // ---------------------------------------------------------------------------- // routeRegexpGroup // ---------------------------------------------------------------------------- @@ -248,13 +221,8 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) if v.host != nil { hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) if hostVars != nil { - subexpNames := v.host.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.host.varsN[varName]] = hostVars[i+1] - varName++ - } + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] } } } @@ -262,13 +230,8 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) if v.path != nil { pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) if pathVars != nil { - subexpNames := v.path.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.path.varsN[varName]] = pathVars[i+1] - varName++ - } + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] } // Check if we should redirect. if v.path.strictSlash { @@ -287,16 +250,12 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } } // Store query string variables. + rawQuery := req.URL.RawQuery for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) + queryVars := q.regexp.FindStringSubmatch(rawQuery) if queryVars != nil { - subexpNames := q.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[q.varsN[varName]] = queryVars[i+1] - varName++ - } + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] } } } diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go similarity index 83% rename from Godeps/_workspace/src/github.com/gorilla/mux/route.go rename to vendor/github.com/gorilla/mux/route.go index 913432c1..c310e66b 100644 --- a/Godeps/_workspace/src/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -9,7 +9,6 @@ import ( "fmt" "net/http" "net/url" - "regexp" "strings" ) @@ -32,8 +31,6 @@ type Route struct { name string // Error resulted from building a route. err error - - buildVarsFunc BuildVarsFunc } // Match matches the route against the request. @@ -189,7 +186,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery type headerMatcher map[string]string func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) + return matchMap(m, r.Header, true) } // Headers adds a matcher for request header values. @@ -200,45 +197,22 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. +// +// It the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) + headers, r.err = mapFromPairs(pairs...) return r.addMatcher(headerMatcher(headers)) } return r } -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// Regular expressions can be used with headers as well. -// It accepts a sequence of key/value pairs, where the value has regex support. For example -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - // Host ----------------------------------------------------------------------- // Host adds a matcher for the URL host. // It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: +// Variables can define an optional regexp pattern to me matched: // // - {name} matches anything until the next dot. // @@ -247,7 +221,7 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // For example: // // r := mux.NewRouter() -// r.Host("www.example.com") +// r.Host("www.domain.com") // r.Host("{subdomain}.domain.com") // r.Host("{subdomain:[a-z]+}.domain.com") // @@ -296,7 +270,7 @@ func (r *Route) Methods(methods ...string) *Route { // Path adds a matcher for the URL path. // It accepts a template with zero or more URL variables enclosed by {}. The // template must start with a "/". -// Variables can define an optional regexp pattern to be matched: +// Variables can define an optional regexp pattern to me matched: // // - {name} matches anything until the next slash. // @@ -347,7 +321,7 @@ func (r *Route) PathPrefix(tpl string) *Route { // // It the value is an empty string, it will match any value if the key is set. // -// Variables can define an optional regexp pattern to be matched: +// Variables can define an optional regexp pattern to me matched: // // - {name} matches anything until the next slash. // @@ -360,7 +334,7 @@ func (r *Route) Queries(pairs ...string) *Route { return nil } for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { return r } } @@ -386,19 +360,6 @@ func (r *Route) Schemes(schemes ...string) *Route { return r.addMatcher(schemeMatcher(schemes)) } -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f - return r -} - // Subrouter ------------------------------------------------------------------ // Subrouter creates a subrouter for the route. @@ -406,7 +367,7 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // It will test the inner routes only if the parent route matched. For example: // // r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() +// s := r.Host("www.domain.com").Subrouter() // s.HandleFunc("/products/", ProductsHandler) // s.HandleFunc("/products/{key}", ProductHandler) // s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) @@ -461,20 +422,17 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.regexp == nil { return nil, errors.New("mux: route doesn't have a host or path") } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } var scheme, host, path string + var err error if r.regexp.host != nil { // Set a default scheme. scheme = "http" - if host, err = r.regexp.host.url(values); err != nil { + if host, err = r.regexp.host.url(pairs...); err != nil { return nil, err } } if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { + if path, err = r.regexp.path.url(pairs...); err != nil { return nil, err } } @@ -495,11 +453,7 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) + host, err := r.regexp.host.url(pairs...) if err != nil { return nil, err } @@ -519,11 +473,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) + path, err := r.regexp.path.url(pairs...) if err != nil { return nil, err } @@ -532,26 +482,6 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { }, nil } -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} - // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -560,7 +490,6 @@ func (r *Route) buildVars(m map[string]string) map[string]string { type parentRoute interface { getNamedRoutes() map[string]*Route getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string } // getNamedRoutes returns the map where named routes are registered. diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE rename to vendor/github.com/opencontainers/runc/LICENSE diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS similarity index 100% rename from Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS rename to vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go similarity index 100% rename from Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go rename to vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go rename to vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go similarity index 100% rename from Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go rename to vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go similarity index 100% rename from Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go rename to vendor/github.com/opencontainers/runc/libcontainer/user/user.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE rename to vendor/github.com/vbatts/tar-split/LICENSE diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/common.go rename to vendor/github.com/vbatts/tar-split/archive/tar/common.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/reader.go rename to vendor/github.com/vbatts/tar-split/archive/tar/reader.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atim.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atim.go rename to vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go rename to vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/stat_unix.go rename to vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/archive/tar/writer.go rename to vendor/github.com/vbatts/tar-split/archive/tar/writer.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/README.md b/vendor/github.com/vbatts/tar-split/tar/asm/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/README.md rename to vendor/github.com/vbatts/tar-split/tar/asm/README.md diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/assemble.go rename to vendor/github.com/vbatts/tar-split/tar/asm/assemble.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/disassemble.go rename to vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/doc.go b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/asm/doc.go rename to vendor/github.com/vbatts/tar-split/tar/asm/doc.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/doc.go b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/doc.go rename to vendor/github.com/vbatts/tar-split/tar/storage/doc.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/entry.go rename to vendor/github.com/vbatts/tar-split/tar/storage/entry.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/getter.go rename to vendor/github.com/vbatts/tar-split/tar/storage/getter.go diff --git a/Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go similarity index 100% rename from Godeps/_workspace/src/github.com/vbatts/tar-split/tar/storage/packer.go rename to vendor/github.com/vbatts/tar-split/tar/storage/packer.go diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE rename to vendor/golang.org/x/net/LICENSE diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go similarity index 99% rename from Godeps/_workspace/src/golang.org/x/net/context/context.go rename to vendor/golang.org/x/net/context/context.go index 11bd8d34..e7ee376c 100644 --- a/Godeps/_workspace/src/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -34,7 +34,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "golang.org/x/net/context" import ( "errors" @@ -189,7 +189,7 @@ func Background() Context { } // TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the +// it's unclear which Context to use or it's is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine // whether Contexts are propagated correctly in a program.