diff --git a/examples/gcp.yml b/examples/gcp.yml
index 524ba5986..12fedb14b 100644
--- a/examples/gcp.yml
+++ b/examples/gcp.yml
@@ -5,25 +5,25 @@ init: "mobylinux/init:00c3a5bbfd9794f4a3187fcc4a9f0c826c46d474"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
- name: binfmt
image: "mobylinux/binfmt:bdb754f25a5d851b4f5f8d185a43dfcbb3c22d01"
binds:
- /proc/sys/fs/binfmt_misc:/binfmt_misc
- read_only: true
+ readonly: true
command: [/usr/bin/binfmt, -dir, /etc/binfmt.d/, -mount, /binfmt_misc]
- name: metadata-gcp
image: "mobylinux/metadata-gcp:7fc3dd5ef92e0408fb3f76048bbaae88bbb55ad9"
binds:
- /tmp:/etc/ssh
- /etc/resolv.conf:/etc/resolv.conf
- read_only: true
- network_mode: host
+ readonly: true
+ net: host
uts: host
capabilities:
- CAP_SYS_ADMIN
@@ -32,8 +32,8 @@ daemon:
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
- name: sshd
image: "mobylinux/sshd:4f8452ddaff703416fd7452fcd9693b96b23e847"
@@ -45,11 +45,11 @@ daemon:
- CAP_DAC_OVERRIDE
- CAP_SYS_CHROOT
- CAP_KILL
- network_mode: host
+ net: host
+ pid: host
binds:
- /tmp/authorized_keys:/root/.ssh/authorized_keys
- /etc/resolv.conf:/etc/resolv.conf
- pid: host
- name: nginx
image: "nginx:alpine"
capabilities:
@@ -58,7 +58,7 @@ daemon:
- CAP_SETUID
- CAP_SETGID
- CAP_DAC_OVERRIDE
- network_mode: host
+ net: host
files:
- path: etc/docker/daemon.json
contents: '{"debug": true}'
diff --git a/examples/sshd.yml b/examples/sshd.yml
index b40fb9218..ba4b270ac 100644
--- a/examples/sshd.yml
+++ b/examples/sshd.yml
@@ -5,7 +5,7 @@ init: "mobylinux/init:00c3a5bbfd9794f4a3187fcc4a9f0c826c46d474"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
@@ -20,7 +20,7 @@ daemon:
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
+ oomScoreAdj: -800
command: [/bin/tini, /usr/sbin/rngd, -f]
- name: sshd
image: "mobylinux/sshd:4f8452ddaff703416fd7452fcd9693b96b23e847"
@@ -32,11 +32,11 @@ daemon:
- CAP_DAC_OVERRIDE
- CAP_SYS_CHROOT
- CAP_KILL
- network_mode: host
+ net: host
+ pid: host
binds:
- /root/.ssh:/root/.ssh
- /etc/resolv.conf:/etc/resolv.conf
- pid: host
files:
- path: root/.ssh/authorized_keys
contents: '#your ssh key here'
diff --git a/examples/vmware.yml b/examples/vmware.yml
index 0e26d8b1f..01e8823b0 100644
--- a/examples/vmware.yml
+++ b/examples/vmware.yml
@@ -5,25 +5,25 @@ init: "mobylinux/init:00c3a5bbfd9794f4a3187fcc4a9f0c826c46d474"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
- name: binfmt
image: "mobylinux/binfmt:bdb754f25a5d851b4f5f8d185a43dfcbb3c22d01"
binds:
- /proc/sys/fs/binfmt_misc:/binfmt_misc
- read_only: true
+ readonly: true
command: [/usr/bin/binfmt, -dir, /etc/binfmt.d/, -mount, /binfmt_misc]
daemon:
- name: rngd
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
- name: nginx
image: "nginx:alpine"
@@ -33,7 +33,7 @@ daemon:
- CAP_SETUID
- CAP_SETGID
- CAP_DAC_OVERRIDE
- network_mode: host
+ net: host
files:
- path: etc/docker/daemon.json
contents: '{"debug": true}'
diff --git a/moby.yml b/moby.yml
index 68f0b6e10..8fab7a2e2 100644
--- a/moby.yml
+++ b/moby.yml
@@ -5,25 +5,25 @@ init: "mobylinux/init:00c3a5bbfd9794f4a3187fcc4a9f0c826c46d474"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
- name: binfmt
image: "mobylinux/binfmt:bdb754f25a5d851b4f5f8d185a43dfcbb3c22d01"
binds:
- /proc/sys/fs/binfmt_misc:/binfmt_misc
- read_only: true
+ readonly: true
command: [/usr/bin/binfmt, -dir, /etc/binfmt.d/, -mount, /binfmt_misc]
daemon:
- name: rngd
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
- name: nginx
image: "nginx:alpine"
@@ -33,7 +33,7 @@ daemon:
- CAP_SETUID
- CAP_SETGID
- CAP_DAC_OVERRIDE
- network_mode: host
+ net: host
files:
- path: etc/docker/daemon.json
contents: '{"debug": true}'
diff --git a/projects/miragesdk/examples/mirage-dhcp.yml b/projects/miragesdk/examples/mirage-dhcp.yml
index fb93fbcd9..575cf079c 100644
--- a/projects/miragesdk/examples/mirage-dhcp.yml
+++ b/projects/miragesdk/examples/mirage-dhcp.yml
@@ -5,28 +5,28 @@ init: "mobylinux/init:3024f1eaf8779691229d661791607aade4df855d"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
- name: binfmt
image: "mobylinux/binfmt:bdb754f25a5d851b4f5f8d185a43dfcbb3c22d01"
binds:
- /proc/sys/fs/binfmt_misc:/binfmt_misc
- read_only: true
+ readonly: true
command: [/usr/bin/binfmt, -dir, /etc/binfmt.d/, -mount, /binfmt_misc]
daemon:
- name: rngd
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
- name: dhcp-client
- network_mode: host
+ net: host
image: "mobylinux/dhcp-client:f40cafe2ade4b115704750a85d21eb35b1116b91"
capabilities:
- CAP_NET_ADMIN # to bring eth0 up
@@ -34,7 +34,7 @@ daemon:
binds:
- /var/run/dhcp-client:/data
command: [/dhcp-client, -vv]
- read_only: true
+ readonly: true
files:
- path: /var/run/dhcp-client/README
contents: 'data for dhcp-client'
diff --git a/projects/selinux/selinux.yml b/projects/selinux/selinux.yml
index 287964ecf..36e5fe05e 100644
--- a/projects/selinux/selinux.yml
+++ b/projects/selinux/selinux.yml
@@ -5,19 +5,19 @@ init: "mobylinux/init:b5249a412536b4e69f8e1f668680d2ae185cc505"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
daemon:
- name: rngd
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
files:
- path: etc/docker/daemon.json
diff --git a/projects/wireguard/examples/wireguard.yml b/projects/wireguard/examples/wireguard.yml
index cda588be4..4d8b06403 100644
--- a/projects/wireguard/examples/wireguard.yml
+++ b/projects/wireguard/examples/wireguard.yml
@@ -5,19 +5,19 @@ init: "mobylinux/init-wireguard:4309fb8b65cafa9e07b0e75d86a0bff4070e67e9"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
daemon:
- name: rngd
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
files:
- path: etc/docker/daemon.json
diff --git a/src/cmd/moby/build.go b/src/cmd/moby/build.go
index 5fffb34f6..17d7a3350 100644
--- a/src/cmd/moby/build.go
+++ b/src/cmd/moby/build.go
@@ -117,7 +117,7 @@ func buildInternal(name string, pull bool, conf string) {
log.Infof(" Create OCI config for %s", image.Image)
config, err := ConfigToOCI(&image)
if err != nil {
- log.Fatalf("Failed to run riddler to get config.json for %s: %v", image.Image, err)
+ log.Fatalf("Failed to create config.json for %s: %v", image.Image, err)
}
so := fmt.Sprintf("%03d", i)
path := "containers/system/" + so + "-" + image.Name
@@ -141,7 +141,7 @@ func buildInternal(name string, pull bool, conf string) {
log.Infof(" Create OCI config for %s", image.Image)
config, err := ConfigToOCI(&image)
if err != nil {
- log.Fatalf("Failed to run riddler to get config.json for %s: %v", image.Image, err)
+ log.Fatalf("Failed to create config.json for %s: %v", image.Image, err)
}
path := "containers/daemon/" + image.Name
out, err := ImageBundle(path, image.Image, config)
diff --git a/src/cmd/moby/config.go b/src/cmd/moby/config.go
index de0966bff..2ea34e57a 100644
--- a/src/cmd/moby/config.go
+++ b/src/cmd/moby/config.go
@@ -3,13 +3,14 @@ package main
import (
"archive/tar"
"bytes"
+ "encoding/json"
"errors"
"fmt"
"path"
- "strconv"
"strings"
log "github.com/Sirupsen/logrus"
+ "github.com/opencontainers/runtime-spec/specs-go"
"gopkg.in/yaml.v2"
)
@@ -36,23 +37,31 @@ type Moby struct {
}
}
-// MobyImage is the type of an image config, based on Compose
+// MobyImage is the type of an image config
type MobyImage struct {
- Name string
- Image string
- Capabilities []string
- Binds []string
- OomScoreAdj int64 `yaml:"oom_score_adj"`
- Command []string
- NetworkMode string `yaml:"network_mode"`
- Pid string
- Ipc string
- Uts string
- ReadOnly bool `yaml:"read_only"`
+ Name string
+ Image string
+ Capabilities []string
+ Mounts []specs.Mount
+ Binds []string
+ Tmpfs []string
+ Args []string
+ Env []string
+ Cwd string
+ Net string
+ Pid string
+ Ipc string
+ Uts string
+ Readonly bool
+ UID uint32 `yaml:"uid"`
+ GID uint32 `yaml:"gid"`
+ AdditionalGids []uint32 `yaml:"additionalGids"`
+ NoNewPrivileges bool `yaml:"noNewPrivileges"`
+ Hostname string
+ OomScoreAdj int `yaml:"oomScoreAdj"`
+ DisableOOMKiller bool `yaml:"disableOOMKiller"`
}
-const riddler = "mobylinux/riddler:decf6c9e24b579175a038a76f9721e7aca507abd@sha256:9d24a7c48204b94b5d76cc3d6cf70f779d87d08d8a893169292c98d0e19ab579"
-
// NewConfig parses a config file
func NewConfig(config []byte) (*Moby, error) {
m := Moby{}
@@ -66,53 +75,183 @@ func NewConfig(config []byte) (*Moby, error) {
}
// ConfigToOCI converts a config specification to an OCI config file
-func ConfigToOCI(image *MobyImage) (string, error) {
- // riddler arguments
- args := []string{"-v", "/var/run/docker.sock:/var/run/docker.sock", riddler, image.Image}
- // docker arguments
- args = append(args, "--cap-drop", "all")
- for _, cap := range image.Capabilities {
- if strings.ToUpper(cap)[0:4] == "CAP_" {
- cap = cap[4:]
- }
- args = append(args, "--cap-add", cap)
- }
- if image.OomScoreAdj != 0 {
- args = append(args, "--oom-score-adj", strconv.FormatInt(image.OomScoreAdj, 10))
- }
- if image.NetworkMode != "" {
- // TODO only "host" supported
- args = append(args, "--net="+image.NetworkMode)
- }
- if image.Pid != "" {
- // TODO only "host" supported
- args = append(args, "--pid="+image.Pid)
- }
- if image.Ipc != "" {
- // TODO only "host" supported
- args = append(args, "--ipc="+image.Ipc)
- }
- if image.Uts != "" {
- // TODO only "host" supported
- args = append(args, "--uts="+image.Uts)
- }
- for _, bind := range image.Binds {
- args = append(args, "-v", bind)
- }
- if image.ReadOnly {
- args = append(args, "--read-only")
- }
- // image
- args = append(args, image.Image)
- // command
- args = append(args, image.Command...)
+func ConfigToOCI(image *MobyImage) ([]byte, error) {
+ oci := specs.Spec{}
- config, err := dockerRun(args...)
+ // TODO pass through same docker client to all functions
+ cli, err := dockerClient()
if err != nil {
- return "", fmt.Errorf("Failed to run riddler to get config.json: %v", err)
+ return []byte{}, err
}
- return string(config), nil
+ inspect, err := dockerInspectImage(cli, image.Image)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ config := inspect.Config
+ if config == nil {
+ return []byte{}, errors.New("empty image config")
+ }
+
+ args := append(config.Entrypoint, config.Cmd...)
+ if len(image.Args) != 0 {
+ args = image.Args
+ }
+ env := config.Env
+ if len(image.Env) != 0 {
+ env = image.Env
+ }
+ cwd := config.WorkingDir
+ if image.Cwd != "" {
+ cwd = image.Cwd
+ }
+ if cwd == "" {
+ cwd = "/"
+ }
+ devOptions := []string{"nosuid", "strictatime", "mode=755", "size=65536k"}
+ if image.Readonly {
+ devOptions = append(devOptions, "ro")
+ }
+ ptsOptions := []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"}
+ sysOptions := []string{"nosuid", "noexec", "nodev"}
+ if image.Readonly {
+ sysOptions = append(sysOptions, "ro")
+ }
+ cgroupOptions := []string{"nosuid", "noexec", "nodev", "relatime", "ro"}
+ // note omits "standard" /dev/shm and /dev/mqueue
+ mounts := []specs.Mount{
+ {Destination: "/proc", Type: "proc", Source: "proc"},
+ {Destination: "/dev", Type: "tmpfs", Source: "tmpfs", Options: devOptions},
+ {Destination: "/dev/pts", Type: "devpts", Source: "devpts", Options: ptsOptions},
+ {Destination: "/sys", Type: "sysfs", Source: "sysfs", Options: sysOptions},
+ {Destination: "/sys/fs/cgroup", Type: "cgroup", Source: "cgroup", Options: cgroupOptions},
+ }
+ // TODO if any standard mount points supplied, remove from above, so can change options
+ mounts = append(mounts, image.Mounts...)
+ for _, t := range image.Tmpfs {
+ parts := strings.Split(t, ":")
+ if len(parts) > 2 {
+ return []byte{}, fmt.Errorf("Cannot parse tmpfs, too many ':': %s", t)
+ }
+ dest := parts[0]
+ opts := []string{}
+ if len(parts) == 2 {
+ opts = strings.Split(parts[2], ",")
+ }
+ mounts = append(mounts, specs.Mount{Destination: dest, Type: "tmpfs", Source: "tmpfs", Options: opts})
+ }
+ for _, b := range image.Binds {
+ parts := strings.Split(b, ":")
+ if len(parts) < 2 {
+ return []byte{}, fmt.Errorf("Cannot parse bind, missing ':': %s", b)
+ }
+ if len(parts) > 3 {
+ return []byte{}, fmt.Errorf("Cannot parse bind, too many ':': %s", b)
+ }
+ src := parts[0]
+ dest := parts[1]
+ opts := []string{"rw", "rbind", "rprivate"}
+ if len(parts) == 3 {
+ opts = strings.Split(parts[2], ",")
+ }
+ mounts = append(mounts, specs.Mount{Destination: dest, Type: "bind", Source: src, Options: opts})
+ }
+
+ namespaces := []specs.LinuxNamespace{}
+ if image.Net != "" && image.Net != "host" {
+ return []byte{}, fmt.Errorf("invalid net namespace: %s", image.Net)
+ }
+ if image.Net == "" {
+ namespaces = append(namespaces, specs.LinuxNamespace{Type: specs.NetworkNamespace})
+ }
+ if image.Pid != "" && image.Pid != "host" {
+ return []byte{}, fmt.Errorf("invalid pid namespace: %s", image.Pid)
+ }
+ if image.Pid == "" {
+ namespaces = append(namespaces, specs.LinuxNamespace{Type: specs.PIDNamespace})
+ }
+ if image.Ipc != "" && image.Ipc != "host" {
+ return []byte{}, fmt.Errorf("invalid ipc namespace: %s", image.Ipc)
+ }
+ if image.Ipc == "" {
+ namespaces = append(namespaces, specs.LinuxNamespace{Type: specs.IPCNamespace})
+ }
+ if image.Uts != "" && image.Uts != "host" {
+ return []byte{}, fmt.Errorf("invalid uts namespace: %s", image.Uts)
+ }
+ if image.Uts == "" {
+ namespaces = append(namespaces, specs.LinuxNamespace{Type: specs.UTSNamespace})
+ }
+ // TODO user, cgroup namespaces, maybe mount=host if useful
+ namespaces = append(namespaces, specs.LinuxNamespace{Type: specs.MountNamespace})
+
+ oci.Version = specs.Version
+
+ oci.Platform = specs.Platform{
+ OS: inspect.Os,
+ Arch: inspect.Architecture,
+ }
+
+ oci.Process = specs.Process{
+ Terminal: false,
+ //ConsoleSize
+ User: specs.User{
+ UID: image.UID,
+ GID: image.GID,
+ AdditionalGids: image.AdditionalGids,
+ // Username (Windows)
+ },
+ Args: args,
+ Env: env,
+ Cwd: cwd,
+ Capabilities: &specs.LinuxCapabilities{
+ Bounding: image.Capabilities,
+ Effective: image.Capabilities,
+ Inheritable: image.Capabilities,
+ Permitted: image.Capabilities,
+ Ambient: []string{},
+ },
+ Rlimits: []specs.LinuxRlimit{},
+ NoNewPrivileges: image.NoNewPrivileges,
+ // ApparmorProfile
+ // SelinuxLabel
+ }
+
+ oci.Root = specs.Root{
+ Path: "rootfs",
+ Readonly: image.Readonly,
+ }
+
+ oci.Hostname = image.Hostname
+ oci.Mounts = mounts
+
+ oci.Linux = &specs.Linux{
+ // UIDMappings
+ // GIDMappings
+ // Sysctl
+ Resources: &specs.LinuxResources{
+ // Devices
+ DisableOOMKiller: &image.DisableOOMKiller,
+ // Memory
+ // CPU
+ // Pids
+ // BlockIO
+ // HugepageLimits
+ // Network
+ },
+ // CgroupsPath
+ Namespaces: namespaces,
+ // Devices
+ // Seccomp
+ // RootfsPropagation
+ // MaskedPaths
+ // ReadonlyPaths
+ // MountLabel
+ // IntelRdt
+ }
+
+ return json.MarshalIndent(oci, "", " ")
}
func filesystem(m *Moby) (*bytes.Buffer, error) {
diff --git a/src/cmd/moby/docker.go b/src/cmd/moby/docker.go
index 096f11347..a10de3e1a 100644
--- a/src/cmd/moby/docker.go
+++ b/src/cmd/moby/docker.go
@@ -8,10 +8,14 @@ import (
"fmt"
"io"
"io/ioutil"
+ "os"
"os/exec"
"strings"
log "github.com/Sirupsen/logrus"
+ "github.com/docker/engine-api/client"
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
)
func dockerRun(args ...string) ([]byte, error) {
@@ -274,3 +278,36 @@ func dockerPull(image string) error {
log.Debugf("docker pull: %s...Done", image)
return nil
}
+
+func dockerClient() (*client.Client, error) {
+ // for maximum compatibility as we use nothing new
+ err := os.Setenv("DOCKER_API_VERSION", "1.23")
+ if err != nil {
+ return nil, err
+ }
+ return client.NewEnvClient()
+}
+
+func dockerInspectImage(cli *client.Client, image string) (types.ImageInspect, error) {
+ log.Debugf("docker inspect image: %s", image)
+
+ inspect, _, err := cli.ImageInspectWithRaw(context.Background(), image, false)
+ if err != nil {
+ if client.IsErrImageNotFound(err) {
+ pullErr := dockerPull(image)
+ if pullErr != nil {
+ return types.ImageInspect{}, pullErr
+ }
+ inspect, _, err = cli.ImageInspectWithRaw(context.Background(), image, false)
+ if err != nil {
+ return types.ImageInspect{}, err
+ }
+ } else {
+ return types.ImageInspect{}, err
+ }
+ }
+
+ log.Debugf("docker inspect image: %s...Done", image)
+
+ return inspect, nil
+}
diff --git a/src/cmd/moby/image.go b/src/cmd/moby/image.go
index c7c303e2e..c00f0b528 100644
--- a/src/cmd/moby/image.go
+++ b/src/cmd/moby/image.go
@@ -143,8 +143,8 @@ func imageTar(image, prefix string, tw *tar.Writer) error {
}
// ImageBundle produces an OCI bundle at the given path in a tarball, given an image and a config.json
-func ImageBundle(path, image, config string) ([]byte, error) {
- log.Debugf("image bundle: %s %s cfg: %s", path, image, config)
+func ImageBundle(path string, image string, config []byte) ([]byte, error) {
+ log.Debugf("image bundle: %s %s cfg: %s", path, image, string(config))
out := new(bytes.Buffer)
tw := tar.NewWriter(out)
err := tarPrefix(path+"/rootfs/", tw)
@@ -160,7 +160,7 @@ func ImageBundle(path, image, config string) ([]byte, error) {
if err != nil {
return []byte{}, err
}
- buf := bytes.NewBufferString(config)
+ buf := bytes.NewBuffer(config)
_, err = io.Copy(tw, buf)
if err != nil {
return []byte{}, err
diff --git a/test/ltp/test-ltp.yml b/test/ltp/test-ltp.yml
index ab8261c96..911c3c67b 100644
--- a/test/ltp/test-ltp.yml
+++ b/test/ltp/test-ltp.yml
@@ -5,7 +5,7 @@ init: "mobylinux/init:00c3a5bbfd9794f4a3187fcc4a9f0c826c46d474"
system:
- name: ltp
image: "mobylinux/test-ltp-20170116:fdca2d1bb019b1d51e722e6032c82c7933d4b870"
- network_mode: host
+ net: host
pid: host
capabilities:
- CAP_SYS_ADMIN
diff --git a/test/test.yml b/test/test.yml
index e077eda33..3994d5409 100644
--- a/test/test.yml
+++ b/test/test.yml
@@ -7,14 +7,14 @@ system:
image: "mobylinux/binfmt:bdb754f25a5d851b4f5f8d185a43dfcbb3c22d01"
binds:
- /proc/sys/fs/binfmt_misc:/binfmt_misc
- read_only: true
+ readonly: true
command: [/usr/bin/binfmt, -dir, /etc/binfmt.d/, -mount, /binfmt_misc]
- name: check
image: "mobylinux/check:c9e41ab96b3ea6a3ced97634751e20d12a5bf52f"
pid: host
capabilities:
- CAP_SYS_BOOT
- read_only: true
+ readonly: true
outputs:
- format: kernel+initrd
- format: iso-bios
diff --git a/test/virtsock/test-virtsock-server.yml b/test/virtsock/test-virtsock-server.yml
index a150616cc..1bec9fad3 100644
--- a/test/virtsock/test-virtsock-server.yml
+++ b/test/virtsock/test-virtsock-server.yml
@@ -9,23 +9,23 @@ init: "mobylinux/init:00c3a5bbfd9794f4a3187fcc4a9f0c826c46d474"
system:
- name: sysctl
image: "mobylinux/sysctl:2cf2f9d5b4d314ba1bfc22b2fe931924af666d8c"
- network_mode: host
+ net: host
pid: host
ipc: host
capabilities:
- CAP_SYS_ADMIN
- read_only: true
+ readonly: true
daemon:
- name: rngd
image: "mobylinux/rngd:3dad6dd43270fa632ac031e99d1947f20b22eec9@sha256:1c93c1db7196f6f71f8e300bc1d15f0376dd18e8891c8789d77c8ff19f3a9a92"
capabilities:
- CAP_SYS_ADMIN
- oom_score_adj: -800
- read_only: true
+ oomScoreAdj: -800
+ readonly: true
command: [/bin/tini, /usr/sbin/rngd, -f]
- name: virtsock-server
image: "mobylinux/test-virtsock:35fea96fd01f6edb67021c494ddf098fdb8bbca0"
- read_only: true
+ readonly: true
command: [/bin/tini, /bin/virtsock_stress, -s, -v, 1]
outputs:
diff --git a/tools/riddler/Dockerfile b/tools/riddler/Dockerfile
deleted file mode 100644
index ae7fd9de0..000000000
--- a/tools/riddler/Dockerfile
+++ /dev/null
@@ -1,24 +0,0 @@
-FROM golang:1.8-alpine
-
-RUN \
- apk update && apk upgrade && \
- apk add \
- docker \
- gcc \
- git \
- jq \
- linux-headers \
- musl-dev \
- tar \
- && true
-
-COPY Dockerfile /
-COPY riddler.sh /usr/bin/
-
-RUN git clone https://github.com/jessfraz/riddler.git /go/src/github.com/jessfraz/riddler
-
-WORKDIR /go/src/github.com/jessfraz/riddler
-RUN git checkout 23befa0b232877b5b502b828e24161d801bd67f6
-RUN go build -o /usr/bin/riddler .
-
-ENTRYPOINT ["/usr/bin/riddler.sh"]
diff --git a/tools/riddler/Makefile b/tools/riddler/Makefile
deleted file mode 100644
index 1e9aca79a..000000000
--- a/tools/riddler/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-.PHONY: tag push
-
-BASE=golang:1.8-alpine
-IMAGE=riddler
-
-default: push
-
-hash: Dockerfile riddler.sh
- DOCKER_CONTENT_TRUST=1 docker pull $(BASE)
- tar cf - $^ | docker build --no-cache -t $(IMAGE):build -
- docker run --entrypoint=/bin/sh --rm $(IMAGE):build -c 'cat /Dockerfile /usr/bin/riddler.sh /lib/apk/db/installed | sha1sum' | sed 's/ .*//' > $@
-
-push: hash
- docker pull mobylinux/$(IMAGE):$(shell cat hash) || \
- (docker tag $(IMAGE):build mobylinux/$(IMAGE):$(shell cat hash) && \
- docker push mobylinux/$(IMAGE):$(shell cat hash))
- docker rmi $(IMAGE):build
- rm -f hash
-
-tag: hash
- docker pull mobylinux/$(IMAGE):$(shell cat hash) || \
- docker tag $(IMAGE):build mobylinux/$(IMAGE):$(shell cat hash)
- docker rmi $(IMAGE):build
- rm -f hash
-
-clean:
- rm -f hash
-
-.DELETE_ON_ERROR:
diff --git a/tools/riddler/riddler.sh b/tools/riddler/riddler.sh
deleted file mode 100755
index 6adf606f3..000000000
--- a/tools/riddler/riddler.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/sh
-
-set -e
-
-# arguments are image name, prefix, then arguments passed to Docker
-# eg ./riddler.sh alpine:3.4 --read-only alpine:3.4 ls
-# This script will output config.json
-
-IMAGE="$1"; shift
-
-cd /tmp
-
-# riddler always adds the apparmor options if this is not present
-EXTRA_OPTIONS="--security-opt apparmor=unconfined"
-
-ARGS="$@"
-CONTAINER=$(docker create $EXTRA_OPTIONS $ARGS)
-riddler $CONTAINER > /dev/null
-docker rm $CONTAINER > /dev/null
-
-# unfixed known issues
-# noNewPrivileges is always set by riddler
-
-# These fixes should be removed when riddler is fixed
-# process.rlimits, just a constant at present, not useful
-# memory swappiness is too big by default
-# remove user namespaces
-# --read-only sets /dev ro
-# /sysfs ro unless privileged - cannot detect so will do if grant all caps
-# ipc, uts namespaces always isolated
-
-UTS="."
-IPC="."
-echo $ARGS | grep -q uts=host && UTS=".linux.namespaces = (.linux.namespaces|map(select(.type!=\"uts\")))"
-echo $ARGS | grep -q ipc=host && IPC=".linux.namespaces = (.linux.namespaces|map(select(.type!=\"ipc\")))"
-
-mv config.json config.json.orig
-cat config.json.orig | \
- jq "$UTS" | \
- jq "$IPC" | \
- jq 'del(.process.rlimits)' | \
- jq 'del (.linux.resources.memory.swappiness)' | \
- jq 'del(.linux.uidMappings) | del(.linux.gidMappings) | .linux.namespaces = (.linux.namespaces|map(select(.type!="user")))' | \
- jq 'if .root.readonly==true then .mounts = (.mounts|map(if .destination=="/dev" then .options |= .+ ["ro"] else . end)) else . end' | \
- jq '.mounts = if .process.capabilities | length != 38 then (.mounts|map(if .destination=="/sys" then .options |= .+ ["ro"] else . end)) else . end' | \
- jq '.process.capabilities = { bounding: .process.capabilities, effective: .process.capabilities, inheritable: .process.capabilities, permitted: .process.capabilities }' \
- > config.json
-
-cat config.json
diff --git a/vendor.conf b/vendor.conf
index 70b3c4354..3f4adccb7 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -4,6 +4,10 @@ github.com/Masterminds/sprig 01a849f546a584d7b29bfee253e7db0aed44f7ba
github.com/Sirupsen/logrus 10f801ebc38b33738c9d17d50860f484a0988ff5
github.com/aokoli/goutils 9c37978a95bd5c709a15883b6242714ea6709e64
github.com/armon/go-radix 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
+github.com/docker/distribution 07f32ac1831ed0fc71960b7da5d6bb83cb6881b5
+github.com/docker/engine-api cf82c64276ebc2501e72b241f9fdc1e21e421743
+github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5
+github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
github.com/docker/hyperkit/go 57e91c5bb6655514aa71d00dd1949db891903d34
github.com/docker/infrakit 208d114478ed94ee9015083e13946ca1caaad790
github.com/golang/protobuf/proto c9c7427a2a70d2eb3bafa0ab2dc163e45f143317
@@ -14,6 +18,8 @@ github.com/gorilla/rpc 22c016f3df3febe0c1f6727598b6389507e03a18
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/mitchellh/go-ps 4fdf99ab29366514c69ccccddab5dc58b8d84062
+github.com/opencontainers/runtime-spec d094a5c9c1997ab086197b57e9378fabed394d92
+github.com/pkg/errors ff09b135c25aae272398c51a07235b90a75aa4f0
github.com/rneugeba/iso9660wrap 9c7eaf5ac74b2416be8b7b8d1f35b9af44a6e4fa
github.com/satori/go.uuid b061729afc07e77a8aa4fad0a2fd840958f1942a
github.com/spf13/cobra 7be4beda01ec05d0b93d80b3facd2b6f44080d94
diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE
new file mode 100644
index 000000000..e06d20818
--- /dev/null
+++ b/vendor/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md
new file mode 100644
index 000000000..d35bcb682
--- /dev/null
+++ b/vendor/github.com/docker/distribution/README.md
@@ -0,0 +1,131 @@
+# Distribution
+
+The Docker toolset to pack, ship, store, and deliver content.
+
+This repository's main product is the Docker Registry 2.0 implementation
+for storing and distributing Docker images. It supersedes the
+[docker/docker-registry](https://github.com/docker/docker-registry)
+project with a new API design, focused around security and performance.
+
+
+
+[](https://circleci.com/gh/docker/distribution/tree/master)
+[](https://godoc.org/github.com/docker/distribution)
+
+This repository contains the following components:
+
+|**Component** |Description |
+|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
+| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
+| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
+| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
+
+### How does this integrate with Docker engine?
+
+This project should provide an implementation to a V2 API for use in the [Docker
+core project](https://github.com/docker/docker). The API should be embeddable
+and simplify the process of securely pulling and pushing content from `docker`
+daemons.
+
+### What are the long term goals of the Distribution project?
+
+The _Distribution_ project has the further long term goal of providing a
+secure tool chain for distributing content. The specifications, APIs and tools
+should be as useful with Docker as they are without.
+
+Our goal is to design a professional grade and extensible content distribution
+system that allow users to:
+
+* Enjoy an efficient, secured and reliable way to store, manage, package and
+ exchange content
+* Hack/roll their own on top of healthy open-source components
+* Implement their own home made solution through good specs, and solid
+ extensions mechanism.
+
+## More about Registry 2.0
+
+The new registry implementation provides the following benefits:
+
+- faster push and pull
+- new, more efficient implementation
+- simplified deployment
+- pluggable storage backend
+- webhook notifications
+
+For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
+
+### Who needs to deploy a registry?
+
+By default, Docker users pull images from Docker's public registry instance.
+[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
+ability. Users can also push images to a repository on Docker's public registry,
+if they have a [Docker Hub](https://hub.docker.com/) account.
+
+For some users and even companies, this default behavior is sufficient. For
+others, it is not.
+
+For example, users with their own software products may want to maintain a
+registry for private, company images. Also, you may wish to deploy your own
+image repository for images used to test or in continuous integration. For these
+use cases and others, [deploying your own registry instance](docs/deploying.md)
+may be the better choice.
+
+### Migration to Registry 2.0
+
+For those who have previously deployed their own registry based on the Registry
+1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
+data migration is required. A tool to assist with migration efforts has been
+created. For more information see [docker/migrator]
+(https://github.com/docker/migrator).
+
+## Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
+issues, fixes, and patches to this project. If you are contributing code, see
+the instructions for [building a development environment](docs/recipes/building.md).
+
+## Support
+
+If any issues are encountered while using the _Distribution_ project, several
+avenues are available for support:
+
+
+
+
+ IRC
+ |
+
+ #docker-distribution on FreeNode
+ |
+
+
+
+ Issue Tracker
+ |
+
+ github.com/docker/distribution/issues
+ |
+
+
+
+ Google Groups
+ |
+
+ https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
+ |
+
+
+
+ Mailing List
+ |
+
+ docker@dockerproject.org
+ |
+
+
+
+
+## License
+
+This project is distributed under [Apache License, Version 2.0](LICENSE).
diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go
new file mode 100644
index 000000000..31d821bba
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digest/digest.go
@@ -0,0 +1,139 @@
+package digest
+
+import (
+ "fmt"
+ "hash"
+ "io"
+ "regexp"
+ "strings"
+)
+
+const (
+ // DigestSha256EmptyTar is the canonical sha256 digest of empty data
+ DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+)
+
+// Digest allows simple protection of hex formatted digest strings, prefixed
+// by their algorithm. Strings of type Digest have some guarantee of being in
+// the correct format and it provides quick access to the components of a
+// digest string.
+//
+// The following is an example of the contents of Digest types:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// This allows to abstract the digest behind this type and work only in those
+// terms.
+type Digest string
+
+// NewDigest returns a Digest from alg and a hash.Hash object.
+func NewDigest(alg Algorithm, h hash.Hash) Digest {
+ return NewDigestFromBytes(alg, h.Sum(nil))
+}
+
+// NewDigestFromBytes returns a new digest from the byte contents of p.
+// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
+// functions. This is also useful for rebuilding digests from binary
+// serializations.
+func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
+ return Digest(fmt.Sprintf("%s:%x", alg, p))
+}
+
+// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
+func NewDigestFromHex(alg, hex string) Digest {
+ return Digest(fmt.Sprintf("%s:%s", alg, hex))
+}
+
+// DigestRegexp matches valid digest types.
+var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+
+// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
+var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
+
+var (
+ // ErrDigestInvalidFormat returned when digest format invalid.
+ ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
+
+ // ErrDigestInvalidLength returned when digest has invalid length.
+ ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
+
+ // ErrDigestUnsupported returned when the digest algorithm is unsupported.
+ ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
+)
+
+// ParseDigest parses s and returns the validated digest object. An error will
+// be returned if the format is invalid.
+func ParseDigest(s string) (Digest, error) {
+ d := Digest(s)
+
+ return d, d.Validate()
+}
+
+// FromReader returns the most valid digest for the underlying content using
+// the canonical digest algorithm.
+func FromReader(rd io.Reader) (Digest, error) {
+ return Canonical.FromReader(rd)
+}
+
+// FromBytes digests the input and returns a Digest.
+func FromBytes(p []byte) Digest {
+ return Canonical.FromBytes(p)
+}
+
+// Validate checks that the contents of d is a valid digest, returning an
+// error if not.
+func (d Digest) Validate() error {
+ s := string(d)
+
+ if !DigestRegexpAnchored.MatchString(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ i := strings.Index(s, ":")
+ if i < 0 {
+ return ErrDigestInvalidFormat
+ }
+
+ // case: "sha256:" with no hex.
+ if i+1 == len(s) {
+ return ErrDigestInvalidFormat
+ }
+
+ switch algorithm := Algorithm(s[:i]); algorithm {
+ case SHA256, SHA384, SHA512:
+ if algorithm.Size()*2 != len(s[i+1:]) {
+ return ErrDigestInvalidLength
+ }
+ break
+ default:
+ return ErrDigestUnsupported
+ }
+
+ return nil
+}
+
+// Algorithm returns the algorithm portion of the digest. This will panic if
+// the underlying digest is not in a valid format.
+func (d Digest) Algorithm() Algorithm {
+ return Algorithm(d[:d.sepIndex()])
+}
+
+// Hex returns the hex digest portion of the digest. This will panic if the
+// underlying digest is not in a valid format.
+func (d Digest) Hex() string {
+ return string(d[d.sepIndex()+1:])
+}
+
+func (d Digest) String() string {
+ return string(d)
+}
+
+func (d Digest) sepIndex() int {
+ i := strings.Index(string(d), ":")
+
+ if i < 0 {
+ panic("could not find ':' in digest: " + d)
+ }
+
+ return i
+}
diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go
new file mode 100644
index 000000000..f3105a45b
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digest/digester.go
@@ -0,0 +1,155 @@
+package digest
+
+import (
+ "crypto"
+ "fmt"
+ "hash"
+ "io"
+)
+
+// Algorithm identifies and implementation of a digester by an identifier.
+// Note the that this defines both the hash algorithm used and the string
+// encoding.
+type Algorithm string
+
+// supported digest types
+const (
+ SHA256 Algorithm = "sha256" // sha256 with hex encoding
+ SHA384 Algorithm = "sha384" // sha384 with hex encoding
+ SHA512 Algorithm = "sha512" // sha512 with hex encoding
+
+ // Canonical is the primary digest algorithm used with the distribution
+ // project. Other digests may be used but this one is the primary storage
+ // digest.
+ Canonical = SHA256
+)
+
+var (
+ // TODO(stevvooe): Follow the pattern of the standard crypto package for
+ // registration of digests. Effectively, we are a registerable set and
+ // common symbol access.
+
+ // algorithms maps values to hash.Hash implementations. Other algorithms
+ // may be available but they cannot be calculated by the digest package.
+ algorithms = map[Algorithm]crypto.Hash{
+ SHA256: crypto.SHA256,
+ SHA384: crypto.SHA384,
+ SHA512: crypto.SHA512,
+ }
+)
+
+// Available returns true if the digest type is available for use. If this
+// returns false, New and Hash will return nil.
+func (a Algorithm) Available() bool {
+ h, ok := algorithms[a]
+ if !ok {
+ return false
+ }
+
+ // check availability of the hash, as well
+ return h.Available()
+}
+
+func (a Algorithm) String() string {
+ return string(a)
+}
+
+// Size returns number of bytes returned by the hash.
+func (a Algorithm) Size() int {
+ h, ok := algorithms[a]
+ if !ok {
+ return 0
+ }
+ return h.Size()
+}
+
+// Set implemented to allow use of Algorithm as a command line flag.
+func (a *Algorithm) Set(value string) error {
+ if value == "" {
+ *a = Canonical
+ } else {
+ // just do a type conversion, support is queried with Available.
+ *a = Algorithm(value)
+ }
+
+ return nil
+}
+
+// New returns a new digester for the specified algorithm. If the algorithm
+// does not have a digester implementation, nil will be returned. This can be
+// checked by calling Available before calling New.
+func (a Algorithm) New() Digester {
+ return &digester{
+ alg: a,
+ hash: a.Hash(),
+ }
+}
+
+// Hash returns a new hash as used by the algorithm. If not available, the
+// method will panic. Check Algorithm.Available() before calling.
+func (a Algorithm) Hash() hash.Hash {
+ if !a.Available() {
+ // NOTE(stevvooe): A missing hash is usually a programming error that
+ // must be resolved at compile time. We don't import in the digest
+ // package to allow users to choose their hash implementation (such as
+ // when using stevvooe/resumable or a hardware accelerated package).
+ //
+ // Applications that may want to resolve the hash at runtime should
+ // call Algorithm.Available before call Algorithm.Hash().
+ panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
+ }
+
+ return algorithms[a].New()
+}
+
+// FromReader returns the digest of the reader using the algorithm.
+func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
+ digester := a.New()
+
+ if _, err := io.Copy(digester.Hash(), rd); err != nil {
+ return "", err
+ }
+
+ return digester.Digest(), nil
+}
+
+// FromBytes digests the input and returns a Digest.
+func (a Algorithm) FromBytes(p []byte) Digest {
+ digester := a.New()
+
+ if _, err := digester.Hash().Write(p); err != nil {
+ // Writes to a Hash should never fail. None of the existing
+ // hash implementations in the stdlib or hashes vendored
+ // here can return errors from Write. Having a panic in this
+ // condition instead of having FromBytes return an error value
+ // avoids unnecessary error handling paths in all callers.
+ panic("write to hash function returned error: " + err.Error())
+ }
+
+ return digester.Digest()
+}
+
+// TODO(stevvooe): Allow resolution of verifiers using the digest type and
+// this registration system.
+
+// Digester calculates the digest of written data. Writes should go directly
+// to the return value of Hash, while calling Digest will return the current
+// value of the digest.
+type Digester interface {
+ Hash() hash.Hash // provides direct access to underlying hash instance.
+ Digest() Digest
+}
+
+// digester provides a simple digester definition that embeds a hasher.
+type digester struct {
+ alg Algorithm
+ hash hash.Hash
+}
+
+func (d *digester) Hash() hash.Hash {
+ return d.hash
+}
+
+func (d *digester) Digest() Digest {
+ return NewDigest(d.alg, d.hash)
+}
diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go
new file mode 100644
index 000000000..f64b0db32
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digest/doc.go
@@ -0,0 +1,42 @@
+// Package digest provides a generalized type to opaquely represent message
+// digests and their operations within the registry. The Digest type is
+// designed to serve as a flexible identifier in a content-addressable system.
+// More importantly, it provides tools and wrappers to work with
+// hash.Hash-based digests with little effort.
+//
+// Basics
+//
+// The format of a digest is simply a string with two parts, dubbed the
+// "algorithm" and the "digest", separated by a colon:
+//
+// :
+//
+// An example of a sha256 digest representation follows:
+//
+// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
+//
+// In this case, the string "sha256" is the algorithm and the hex bytes are
+// the "digest".
+//
+// Because the Digest type is simply a string, once a valid Digest is
+// obtained, comparisons are cheap, quick and simple to express with the
+// standard equality operator.
+//
+// Verification
+//
+// The main benefit of using the Digest type is simple verification against a
+// given digest. The Verifier interface, modeled after the stdlib hash.Hash
+// interface, provides a common write sink for digest verification. After
+// writing is complete, calling the Verifier.Verified method will indicate
+// whether or not the stream of bytes matches the target digest.
+//
+// Missing Features
+//
+// In addition to the above, we intend to add the following features to this
+// package:
+//
+// 1. A Digester type that supports write sink digest calculation.
+//
+// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
+//
+package digest
diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go
new file mode 100644
index 000000000..4b9313c1a
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digest/set.go
@@ -0,0 +1,245 @@
+package digest
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // ErrDigestNotFound is used when a matching digest
+ // could not be found in a set.
+ ErrDigestNotFound = errors.New("digest not found")
+
+ // ErrDigestAmbiguous is used when multiple digests
+ // are found in a set. None of the matching digests
+ // should be considered valid matches.
+ ErrDigestAmbiguous = errors.New("ambiguous digest string")
+)
+
+// Set is used to hold a unique set of digests which
+// may be easily referenced by easily referenced by a string
+// representation of the digest as well as short representation.
+// The uniqueness of the short representation is based on other
+// digests in the set. If digests are omitted from this set,
+// collisions in a larger set may not be detected, therefore it
+// is important to always do short representation lookups on
+// the complete set of digests. To mitigate collisions, an
+// appropriately long short code should be used.
+type Set struct {
+ mutex sync.RWMutex
+ entries digestEntries
+}
+
+// NewSet creates an empty set of digests
+// which may have digests added.
+func NewSet() *Set {
+ return &Set{
+ entries: digestEntries{},
+ }
+}
+
+// checkShortMatch checks whether two digests match as either whole
+// values or short values. This function does not test equality,
+// rather whether the second value could match against the first
+// value.
+func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
+ if len(hex) == len(shortHex) {
+ if hex != shortHex {
+ return false
+ }
+ if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ } else if !strings.HasPrefix(hex, shortHex) {
+ return false
+ } else if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ return true
+}
+
+// Lookup looks for a digest matching the given string representation.
+// If no digests could be found ErrDigestNotFound will be returned
+// with an empty digest value. If multiple matches are found
+// ErrDigestAmbiguous will be returned with an empty digest value.
+func (dst *Set) Lookup(d string) (Digest, error) {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ if len(dst.entries) == 0 {
+ return "", ErrDigestNotFound
+ }
+ var (
+ searchFunc func(int) bool
+ alg Algorithm
+ hex string
+ )
+ dgst, err := ParseDigest(d)
+ if err == ErrDigestInvalidFormat {
+ hex = d
+ searchFunc = func(i int) bool {
+ return dst.entries[i].val >= d
+ }
+ } else {
+ hex = dgst.Hex()
+ alg = dgst.Algorithm()
+ searchFunc = func(i int) bool {
+ if dst.entries[i].val == hex {
+ return dst.entries[i].alg >= alg
+ }
+ return dst.entries[i].val >= hex
+ }
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
+ return "", ErrDigestNotFound
+ }
+ if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
+ return dst.entries[idx].digest, nil
+ }
+ if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
+ return "", ErrDigestAmbiguous
+ }
+
+ return dst.entries[idx].digest, nil
+}
+
+// Add adds the given digest to the set. An error will be returned
+// if the given digest is invalid. If the digest already exists in the
+// set, this operation will be a no-op.
+func (dst *Set) Add(d Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) {
+ dst.entries = append(dst.entries, entry)
+ return nil
+ } else if dst.entries[idx].digest == d {
+ return nil
+ }
+
+ entries := append(dst.entries, nil)
+ copy(entries[idx+1:], entries[idx:len(entries)-1])
+ entries[idx] = entry
+ dst.entries = entries
+ return nil
+}
+
+// Remove removes the given digest from the set. An err will be
+// returned if the given digest is invalid. If the digest does
+// not exist in the set, this operation will be a no-op.
+func (dst *Set) Remove(d Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ // Not found if idx is after or value at idx is not digest
+ if idx == len(dst.entries) || dst.entries[idx].digest != d {
+ return nil
+ }
+
+ entries := dst.entries
+ copy(entries[idx:], entries[idx+1:])
+ entries = entries[:len(entries)-1]
+ dst.entries = entries
+
+ return nil
+}
+
+// All returns all the digests in the set
+func (dst *Set) All() []Digest {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ retValues := make([]Digest, len(dst.entries))
+ for i := range dst.entries {
+ retValues[i] = dst.entries[i].digest
+ }
+
+ return retValues
+}
+
+// ShortCodeTable returns a map of Digest to unique short codes. The
+// length represents the minimum value, the maximum length may be the
+// entire value of digest if uniqueness cannot be achieved without the
+// full value. This function will attempt to make short codes as short
+// as possible to be unique.
+func ShortCodeTable(dst *Set, length int) map[Digest]string {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ m := make(map[Digest]string, len(dst.entries))
+ l := length
+ resetIdx := 0
+ for i := 0; i < len(dst.entries); i++ {
+ var short string
+ extended := true
+ for extended {
+ extended = false
+ if len(dst.entries[i].val) <= l {
+ short = dst.entries[i].digest.String()
+ } else {
+ short = dst.entries[i].val[:l]
+ for j := i + 1; j < len(dst.entries); j++ {
+ if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
+ if j > resetIdx {
+ resetIdx = j
+ }
+ extended = true
+ } else {
+ break
+ }
+ }
+ if extended {
+ l++
+ }
+ }
+ }
+ m[dst.entries[i].digest] = short
+ if i >= resetIdx {
+ l = length
+ }
+ }
+ return m
+}
+
+type digestEntry struct {
+ alg Algorithm
+ val string
+ digest Digest
+}
+
+type digestEntries []*digestEntry
+
+func (d digestEntries) Len() int {
+ return len(d)
+}
+
+func (d digestEntries) Less(i, j int) bool {
+ if d[i].val != d[j].val {
+ return d[i].val < d[j].val
+ }
+ return d[i].alg < d[j].alg
+}
+
+func (d digestEntries) Swap(i, j int) {
+ d[i], d[j] = d[j], d[i]
+}
diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go
new file mode 100644
index 000000000..9af3be134
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digest/verifiers.go
@@ -0,0 +1,44 @@
+package digest
+
+import (
+ "hash"
+ "io"
+)
+
+// Verifier presents a general verification interface to be used with message
+// digests and other byte stream verifications. Users instantiate a Verifier
+// from one of the various methods, write the data under test to it then check
+// the result with the Verified method.
+type Verifier interface {
+ io.Writer
+
+ // Verified will return true if the content written to Verifier matches
+ // the digest.
+ Verified() bool
+}
+
+// NewDigestVerifier returns a verifier that compares the written bytes
+// against a passed in digest.
+func NewDigestVerifier(d Digest) (Verifier, error) {
+ if err := d.Validate(); err != nil {
+ return nil, err
+ }
+
+ return hashVerifier{
+ hash: d.Algorithm().Hash(),
+ digest: d,
+ }, nil
+}
+
+type hashVerifier struct {
+ digest Digest
+ hash hash.Hash
+}
+
+func (hv hashVerifier) Write(p []byte) (n int, err error) {
+ return hv.hash.Write(p)
+}
+
+func (hv hashVerifier) Verified() bool {
+ return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
+}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
new file mode 100644
index 000000000..bb09fa25d
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/reference.go
@@ -0,0 +1,334 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [hostname '/'] component ['/' component]*
+// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
+// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+package reference
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/docker/distribution/digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with hostname and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+func SplitHostname(named Named) (string, string) {
+ name := named.Name()
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ // TODO(dmcgowan): Provide more specific and helpful error
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ ref := reference{
+ name: matches[1],
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.ParseDigest(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name, otherwise an error is
+// returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ ref, err := Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+ if !anchoredNameRegexp.MatchString(name) {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository(name), nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ return taggedReference{
+ name: name.Name(),
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ return canonicalReference{
+ name: name.Name(),
+ digest: digest,
+ }, nil
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.name == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ name: ref.name,
+ digest: ref.digest,
+ }
+ }
+ return repository(ref.name)
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ name: ref.name,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ name string
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.name + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Name() string {
+ return r.name
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository string
+
+func (r repository) String() string {
+ return string(r)
+}
+
+func (r repository) Name() string {
+ return string(r)
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return d.String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ name string
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.name + ":" + t.tag
+}
+
+func (t taggedReference) Name() string {
+ return t.name
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ name string
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.name + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Name() string {
+ return c.name
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go
new file mode 100644
index 000000000..9a7d366bc
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/regexp.go
@@ -0,0 +1,124 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // hostnameComponentRegexp restricts the registry hostname component of a
+ // repository name to start with a component as defined by hostnameRegexp
+ // and followed by an optional port.
+ hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // hostnameRegexp defines the structure of potential hostname components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ hostnameRegexp = expression(
+ hostnameComponentRegexp,
+ optional(repeated(literal(`.`), hostnameComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the hostname and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(hostnameRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // hostname and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(hostnameRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE
new file mode 100644
index 000000000..c157bff96
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/engine-api/README.md b/vendor/github.com/docker/engine-api/README.md
new file mode 100644
index 000000000..897dbdf9e
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/README.md
@@ -0,0 +1,68 @@
+[](https://godoc.org/github.com/docker/engine-api)
+
+# Introduction
+
+Engine-api is a set of Go libraries to implement client and server components compatible with the Docker engine.
+The code was extracted from the [Docker engine](https://github.com/docker/docker) and contributed back as an external library.
+
+## Components
+
+### Client
+
+The client package implements a fully featured http client to interact with the Docker engine. It's modeled after the requirements of the Docker engine CLI, but it can also serve other purposes.
+
+#### Usage
+
+You can use this client package in your applications by creating a new client object. Then use that object to execute operations against the remote server. Follow the example below to see how to list all the containers running in a Docker engine host:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/docker/engine-api/client"
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+func main() {
+ defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
+ cli, err := client.NewClient("unix:///var/run/docker.sock", "v1.22", nil, defaultHeaders)
+ if err != nil {
+ panic(err)
+ }
+
+ options := types.ContainerListOptions{All: true}
+ containers, err := cli.ContainerList(context.Background(), options)
+ if err != nil {
+ panic(err)
+ }
+
+ for _, c := range containers {
+ fmt.Println(c.ID)
+ }
+}
+```
+
+### Types
+
+The types package includes all typed structures that client and server serialize to execute operations.
+
+### Server
+
+The server package includes API endpoints that applications compatible with the Docker engine API can reuse. It also provides useful middlewares and helpers to handle http requests.
+
+This package is still pending to be extracted from the Docker engine.
+
+## Developing
+
+engine-api requires some minimal libraries that you can download running `make deps`.
+
+To run tests, use the command `make test`. We use build tags to isolate functions and structures that are only available for testing.
+
+To validate the sources, use the command `make validate`.
+
+## License
+
+engine-api is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/vendor/github.com/docker/engine-api/client/checkpoint_create.go b/vendor/github.com/docker/engine-api/client/checkpoint_create.go
new file mode 100644
index 000000000..23883cc06
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/checkpoint_create.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointCreate creates a checkpoint from the given container with the given name
+func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
+ resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/checkpoint_delete.go b/vendor/github.com/docker/engine-api/client/checkpoint_delete.go
new file mode 100644
index 000000000..a4e9ed0c0
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/checkpoint_delete.go
@@ -0,0 +1,12 @@
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// CheckpointDelete deletes the checkpoint with the given name from the given container
+func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, checkpointID string) error {
+ resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+checkpointID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/checkpoint_list.go b/vendor/github.com/docker/engine-api/client/checkpoint_list.go
new file mode 100644
index 000000000..ef5ec261b
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/checkpoint_list.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointList returns the volumes configured in the docker host.
+func (cli *Client) CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) {
+ var checkpoints []types.Checkpoint
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", nil, nil)
+ if err != nil {
+ return checkpoints, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&checkpoints)
+ ensureReaderClosed(resp)
+ return checkpoints, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/client.go b/vendor/github.com/docker/engine-api/client/client.go
new file mode 100644
index 000000000..f3ad2cf30
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/client.go
@@ -0,0 +1,153 @@
+package client
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/engine-api/client/transport"
+ "github.com/docker/go-connections/tlsconfig"
+)
+
+// DefaultVersion is the version of the current stable API
+const DefaultVersion string = "1.23"
+
+// Client is the API client that performs all operations
+// against a docker server.
+type Client struct {
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+ // basePath holds the path to prepend to the requests.
+ basePath string
+ // transport is the interface to send request with, it implements transport.Client.
+ transport transport.Client
+ // version of the server to talk to.
+ version string
+ // custom http headers configured by users.
+ customHTTPHeaders map[string]string
+}
+
+// NewEnvClient initializes a new API client based on environment variables.
+// Use DOCKER_HOST to set the url to the docker server.
+// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
+// Use DOCKER_CERT_PATH to load the tls certificates from.
+// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
+func NewEnvClient() (*Client, error) {
+ var client *http.Client
+ if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
+ options := tlsconfig.Options{
+ CAFile: filepath.Join(dockerCertPath, "ca.pem"),
+ CertFile: filepath.Join(dockerCertPath, "cert.pem"),
+ KeyFile: filepath.Join(dockerCertPath, "key.pem"),
+ InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
+ }
+ tlsc, err := tlsconfig.Client(options)
+ if err != nil {
+ return nil, err
+ }
+
+ client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsc,
+ },
+ }
+ }
+
+ host := os.Getenv("DOCKER_HOST")
+ if host == "" {
+ host = DefaultDockerHost
+ }
+
+ version := os.Getenv("DOCKER_API_VERSION")
+ if version == "" {
+ version = DefaultVersion
+ }
+
+ return NewClient(host, version, client, nil)
+}
+
+// NewClient initializes a new API client for the given host and API version.
+// It uses the given http client as transport.
+// It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
+func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
+ proto, addr, basePath, err := ParseHost(host)
+ if err != nil {
+ return nil, err
+ }
+
+ transport, err := transport.NewTransportWithHTTP(proto, addr, client)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Client{
+ proto: proto,
+ addr: addr,
+ basePath: basePath,
+ transport: transport,
+ version: version,
+ customHTTPHeaders: httpHeaders,
+ }, nil
+}
+
+// getAPIPath returns the versioned request path to call the api.
+// It appends the query parameters to the path if they are not empty.
+func (cli *Client) getAPIPath(p string, query url.Values) string {
+ var apiPath string
+ if cli.version != "" {
+ v := strings.TrimPrefix(cli.version, "v")
+ apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p)
+ } else {
+ apiPath = fmt.Sprintf("%s%s", cli.basePath, p)
+ }
+
+ u := &url.URL{
+ Path: apiPath,
+ }
+ if len(query) > 0 {
+ u.RawQuery = query.Encode()
+ }
+ return u.String()
+}
+
+// ClientVersion returns the version string associated with this
+// instance of the Client. Note that this value can be changed
+// via the DOCKER_API_VERSION env var.
+func (cli *Client) ClientVersion() string {
+ return cli.version
+}
+
+// UpdateClientVersion updates the version string associated with this
+// instance of the Client.
+func (cli *Client) UpdateClientVersion(v string) {
+ cli.version = v
+}
+
+// ParseHost verifies that the given host strings is valid.
+func ParseHost(host string) (string, string, string, error) {
+ protoAddrParts := strings.SplitN(host, "://", 2)
+ if len(protoAddrParts) == 1 {
+ return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+ }
+
+ var basePath string
+ proto, addr := protoAddrParts[0], protoAddrParts[1]
+ if proto == "tcp" {
+ parsed, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", "", "", err
+ }
+ addr = parsed.Host
+ basePath = parsed.Path
+ }
+ return proto, addr, basePath, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/client_unix.go b/vendor/github.com/docker/engine-api/client/client_unix.go
new file mode 100644
index 000000000..89de892c8
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/client_unix.go
@@ -0,0 +1,6 @@
+// +build linux freebsd solaris openbsd darwin
+
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/vendor/github.com/docker/engine-api/client/client_windows.go b/vendor/github.com/docker/engine-api/client/client_windows.go
new file mode 100644
index 000000000..07c0c7a77
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/client_windows.go
@@ -0,0 +1,4 @@
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/vendor/github.com/docker/engine-api/client/container_attach.go b/vendor/github.com/docker/engine-api/client/container_attach.go
new file mode 100644
index 000000000..1b616bf03
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_attach.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerAttach attaches a connection to a container in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
+ query := url.Values{}
+ if options.Stream {
+ query.Set("stream", "1")
+ }
+ if options.Stdin {
+ query.Set("stdin", "1")
+ }
+ if options.Stdout {
+ query.Set("stdout", "1")
+ }
+ if options.Stderr {
+ query.Set("stderr", "1")
+ }
+ if options.DetachKeys != "" {
+ query.Set("detachKeys", options.DetachKeys)
+ }
+
+ headers := map[string][]string{"Content-Type": {"text/plain"}}
+ return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_commit.go b/vendor/github.com/docker/engine-api/client/container_commit.go
new file mode 100644
index 000000000..d5c474990
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_commit.go
@@ -0,0 +1,53 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ distreference "github.com/docker/distribution/reference"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/reference"
+ "golang.org/x/net/context"
+)
+
+// ContainerCommit applies changes into a container and creates a new tagged image.
+func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) {
+ var repository, tag string
+ if options.Reference != "" {
+ distributionRef, err := distreference.ParseNamed(options.Reference)
+ if err != nil {
+ return types.ContainerCommitResponse{}, err
+ }
+
+ if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
+ return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference")
+ }
+
+ tag = reference.GetTagFromNamedRef(distributionRef)
+ repository = distributionRef.Name()
+ }
+
+ query := url.Values{}
+ query.Set("container", container)
+ query.Set("repo", repository)
+ query.Set("tag", tag)
+ query.Set("comment", options.Comment)
+ query.Set("author", options.Author)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+ if options.Pause != true {
+ query.Set("pause", "0")
+ }
+
+ var response types.ContainerCommitResponse
+ resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_copy.go b/vendor/github.com/docker/engine-api/client/container_copy.go
new file mode 100644
index 000000000..d3dd0b116
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_copy.go
@@ -0,0 +1,97 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+)
+
+// ContainerStatPath returns Stat information about a path inside the container filesystem.
+func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+
+ urlStr := fmt.Sprintf("/containers/%s/archive", containerID)
+ response, err := cli.head(ctx, urlStr, query, nil)
+ if err != nil {
+ return types.ContainerPathStat{}, err
+ }
+ defer ensureReaderClosed(response)
+ return getContainerPathStatFromHeader(response.header)
+}
+
+// CopyToContainer copies content into the container filesystem.
+func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+ // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
+ if !options.AllowOverwriteDirWithFile {
+ query.Set("noOverwriteDirNonDir", "true")
+ }
+
+ apiPath := fmt.Sprintf("/containers/%s/archive", container)
+
+ response, err := cli.putRaw(ctx, apiPath, query, content, nil)
+ if err != nil {
+ return err
+ }
+ defer ensureReaderClosed(response)
+
+ if response.statusCode != http.StatusOK {
+ return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ return nil
+}
+
+// CopyFromContainer gets the content from the container and returns it as a Reader
+// to manipulate it in the host. It's up to the caller to close the reader.
+func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
+ query := make(url.Values, 1)
+ query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
+
+ apiPath := fmt.Sprintf("/containers/%s/archive", container)
+ response, err := cli.get(ctx, apiPath, query, nil)
+ if err != nil {
+ return nil, types.ContainerPathStat{}, err
+ }
+
+ if response.statusCode != http.StatusOK {
+ return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ // In order to get the copy behavior right, we need to know information
+ // about both the source and the destination. The response headers include
+ // stat info about the source that we can use in deciding exactly how to
+ // copy it locally. Along with the stat info about the local destination,
+ // we have everything we need to handle the multiple possibilities there
+ // can be when copying a file/dir from one location to another file/dir.
+ stat, err := getContainerPathStatFromHeader(response.header)
+ if err != nil {
+ return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
+ }
+ return response.body, stat, err
+}
+
+func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
+ var stat types.ContainerPathStat
+
+ encodedStat := header.Get("X-Docker-Container-Path-Stat")
+ statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
+
+ err := json.NewDecoder(statDecoder).Decode(&stat)
+ if err != nil {
+ err = fmt.Errorf("unable to decode container path stat header: %s", err)
+ }
+
+ return stat, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_create.go b/vendor/github.com/docker/engine-api/client/container_create.go
new file mode 100644
index 000000000..98935794d
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_create.go
@@ -0,0 +1,46 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/network"
+ "golang.org/x/net/context"
+)
+
+type configWrapper struct {
+ *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+}
+
+// ContainerCreate creates a new container based in the given configuration.
+// It can be associated with a name, but it's not mandatory.
+func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) {
+ var response types.ContainerCreateResponse
+ query := url.Values{}
+ if containerName != "" {
+ query.Set("name", containerName)
+ }
+
+ body := configWrapper{
+ Config: config,
+ HostConfig: hostConfig,
+ NetworkingConfig: networkingConfig,
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
+ if err != nil {
+ if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
+ return response, imageNotFoundError{config.Image}
+ }
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_diff.go b/vendor/github.com/docker/engine-api/client/container_diff.go
new file mode 100644
index 000000000..f4bb3a46b
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_diff.go
@@ -0,0 +1,23 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerDiff shows differences in a container filesystem since it was started.
+func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) {
+ var changes []types.ContainerChange
+
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
+ if err != nil {
+ return changes, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&changes)
+ ensureReaderClosed(serverResp)
+ return changes, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_exec.go b/vendor/github.com/docker/engine-api/client/container_exec.go
new file mode 100644
index 000000000..ff7e1a9d0
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_exec.go
@@ -0,0 +1,49 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerExecCreate creates a new exec configuration to run an exec process.
+func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) {
+ var response types.ContainerExecCreateResponse
+ resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+// ContainerExecStart starts an exec process already created in the docker host.
+func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
+ resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
+
+// ContainerExecAttach attaches a connection to an exec process in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
+ headers := map[string][]string{"Content-Type": {"application/json"}}
+ return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
+}
+
+// ContainerExecInspect returns information about a specific exec process on the docker host.
+func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
+ var response types.ContainerExecInspect
+ resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_export.go b/vendor/github.com/docker/engine-api/client/container_export.go
new file mode 100644
index 000000000..52194f3d3
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_export.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerExport retrieves the raw contents of a container
+// and returns them as an io.ReadCloser. It's up to the caller
+// to close the stream.
+func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return serverResp.body, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_inspect.go b/vendor/github.com/docker/engine-api/client/container_inspect.go
new file mode 100644
index 000000000..0fa096d38
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_inspect.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerInspect returns the container information.
+func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, err
+ }
+
+ var response types.ContainerJSON
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
+
+// ContainerInspectWithRaw returns the container information and its raw representation.
+func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
+ query := url.Values{}
+ if getSize {
+ query.Set("size", "1")
+ }
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ContainerJSON{}, nil, err
+ }
+
+ var response types.ContainerJSON
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_kill.go b/vendor/github.com/docker/engine-api/client/container_kill.go
new file mode 100644
index 000000000..29f80c73a
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_kill.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerKill terminates the container process but does not remove the container from the docker host.
+func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
+ query := url.Values{}
+ query.Set("signal", signal)
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_list.go b/vendor/github.com/docker/engine-api/client/container_list.go
new file mode 100644
index 000000000..87f7333dc
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_list.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainerList returns the list of containers in the docker host.
+func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ query := url.Values{}
+
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ if options.Limit != -1 {
+ query.Set("limit", strconv.Itoa(options.Limit))
+ }
+
+ if options.Since != "" {
+ query.Set("since", options.Since)
+ }
+
+ if options.Before != "" {
+ query.Set("before", options.Before)
+ }
+
+ if options.Size {
+ query.Set("size", "1")
+ }
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/containers/json", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []types.Container
+ err = json.NewDecoder(resp.body).Decode(&containers)
+ ensureReaderClosed(resp)
+ return containers, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go
new file mode 100644
index 000000000..08b9b9187
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+ timetypes "github.com/docker/engine-api/types/time"
+)
+
+// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_pause.go b/vendor/github.com/docker/engine-api/client/container_pause.go
new file mode 100644
index 000000000..412067a78
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_pause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerPause pauses the main process of a given container without terminating it.
+func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_remove.go b/vendor/github.com/docker/engine-api/client/container_remove.go
new file mode 100644
index 000000000..cef4b8122
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_remove.go
@@ -0,0 +1,27 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerRemove kills and removes a container from the docker host.
+func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
+ query := url.Values{}
+ if options.RemoveVolumes {
+ query.Set("v", "1")
+ }
+ if options.RemoveLinks {
+ query.Set("link", "1")
+ }
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_rename.go b/vendor/github.com/docker/engine-api/client/container_rename.go
new file mode 100644
index 000000000..0e718da7c
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_rename.go
@@ -0,0 +1,16 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerRename changes the name of a given container.
+func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
+ query := url.Values{}
+ query.Set("name", newContainerName)
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_resize.go b/vendor/github.com/docker/engine-api/client/container_resize.go
new file mode 100644
index 000000000..b95d26b33
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_resize.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerResize changes the size of the tty for a container.
+func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
+}
+
+// ContainerExecResize changes the size of the tty for an exec process running inside a container.
+func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
+}
+
+func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error {
+ query := url.Values{}
+ query.Set("h", strconv.Itoa(height))
+ query.Set("w", strconv.Itoa(width))
+
+ resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_restart.go b/vendor/github.com/docker/engine-api/client/container_restart.go
new file mode 100644
index 000000000..93c042d08
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_restart.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/engine-api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerRestart stops and starts a container again.
+// It makes the daemon to wait for the container to be up again for
+// a specific amount of time, given the timeout.
+func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_start.go b/vendor/github.com/docker/engine-api/client/container_start.go
new file mode 100644
index 000000000..1e22eec64
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_start.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+)
+
+// ContainerStart sends a request to the docker daemon to start a container.
+func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
+ query := url.Values{}
+ if len(options.CheckpointID) != 0 {
+ query.Set("checkpoint", options.CheckpointID)
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_stats.go b/vendor/github.com/docker/engine-api/client/container_stats.go
new file mode 100644
index 000000000..2cc67c3af
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_stats.go
@@ -0,0 +1,24 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerStats returns near realtime stats for a given container.
+// It's up to the caller to close the io.ReadCloser returned.
+func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
+ query := url.Values{}
+ query.Set("stream", "0")
+ if stream {
+ query.Set("stream", "1")
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_stop.go b/vendor/github.com/docker/engine-api/client/container_stop.go
new file mode 100644
index 000000000..1fc577f2b
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_stop.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/engine-api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerStop stops a container without terminating the process.
+// The process is blocked until the container stops or the timeout expires.
+func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_top.go b/vendor/github.com/docker/engine-api/client/container_top.go
new file mode 100644
index 000000000..5ad926ae0
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_top.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerTop shows process information from within a container.
+func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) {
+ var response types.ContainerProcessList
+ query := url.Values{}
+ if len(arguments) > 0 {
+ query.Set("ps_args", strings.Join(arguments, " "))
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_unpause.go b/vendor/github.com/docker/engine-api/client/container_unpause.go
new file mode 100644
index 000000000..5c7621125
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_unpause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerUnpause resumes the process execution within a container
+func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_update.go b/vendor/github.com/docker/engine-api/client/container_update.go
new file mode 100644
index 000000000..a5a1826dc
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_update.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/engine-api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerUpdate updates resources of a container
+func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/container_wait.go b/vendor/github.com/docker/engine-api/client/container_wait.go
new file mode 100644
index 000000000..c26ff3f37
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/container_wait.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+)
+
+// ContainerWait pauses execution until a container exits.
+// It returns the API status code as response of its readiness.
+func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
+ if err != nil {
+ return -1, err
+ }
+ defer ensureReaderClosed(resp)
+
+ var res types.ContainerWaitResponse
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ return -1, err
+ }
+
+ return res.StatusCode, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/errors.go b/vendor/github.com/docker/engine-api/client/errors.go
new file mode 100644
index 000000000..e026320bb
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/errors.go
@@ -0,0 +1,203 @@
+package client
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrConnectionFailed is an error raised when the connection between the client and the server failed.
+var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
+
+type notFound interface {
+ error
+ NotFound() bool // Is the error a NotFound error
+}
+
+// IsErrNotFound returns true if the error is caused with an
+// object (image, container, network, volume, …) is not found in the docker host.
+func IsErrNotFound(err error) bool {
+ te, ok := err.(notFound)
+ return ok && te.NotFound()
+}
+
+// imageNotFoundError implements an error returned when an image is not in the docker host.
+type imageNotFoundError struct {
+ imageID string
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e imageNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of an imageNotFoundError
+func (e imageNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such image: %s", e.imageID)
+}
+
+// IsErrImageNotFound returns true if the error is caused
+// when an image is not found in the docker host.
+func IsErrImageNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// containerNotFoundError implements an error returned when a container is not in the docker host.
+type containerNotFoundError struct {
+ containerID string
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e containerNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a containerNotFoundError
+func (e containerNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such container: %s", e.containerID)
+}
+
+// IsErrContainerNotFound returns true if the error is caused
+// when a container is not found in the docker host.
+func IsErrContainerNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// networkNotFoundError implements an error returned when a network is not in the docker host.
+type networkNotFoundError struct {
+ networkID string
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e networkNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a networkNotFoundError
+func (e networkNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such network: %s", e.networkID)
+}
+
+// IsErrNetworkNotFound returns true if the error is caused
+// when a network is not found in the docker host.
+func IsErrNetworkNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// volumeNotFoundError implements an error returned when a volume is not in the docker host.
+type volumeNotFoundError struct {
+ volumeID string
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e volumeNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a networkNotFoundError
+func (e volumeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
+}
+
+// IsErrVolumeNotFound returns true if the error is caused
+// when a volume is not found in the docker host.
+func IsErrVolumeNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// unauthorizedError represents an authorization error in a remote registry.
+type unauthorizedError struct {
+ cause error
+}
+
+// Error returns a string representation of an unauthorizedError
+func (u unauthorizedError) Error() string {
+ return u.cause.Error()
+}
+
+// IsErrUnauthorized returns true if the error is caused
+// when a remote registry authentication fails
+func IsErrUnauthorized(err error) bool {
+ _, ok := err.(unauthorizedError)
+ return ok
+}
+
+// nodeNotFoundError implements an error returned when a node is not found.
+type nodeNotFoundError struct {
+ nodeID string
+}
+
+// Error returns a string representation of a nodeNotFoundError
+func (e nodeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such node: %s", e.nodeID)
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e nodeNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrNodeNotFound returns true if the error is caused
+// when a node is not found.
+func IsErrNodeNotFound(err error) bool {
+ _, ok := err.(nodeNotFoundError)
+ return ok
+}
+
+// serviceNotFoundError implements an error returned when a service is not found.
+type serviceNotFoundError struct {
+ serviceID string
+}
+
+// Error returns a string representation of a serviceNotFoundError
+func (e serviceNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such service: %s", e.serviceID)
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e serviceNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrServiceNotFound returns true if the error is caused
+// when a service is not found.
+func IsErrServiceNotFound(err error) bool {
+ _, ok := err.(serviceNotFoundError)
+ return ok
+}
+
+// taskNotFoundError implements an error returned when a task is not found.
+type taskNotFoundError struct {
+ taskID string
+}
+
+// Error returns a string representation of a taskNotFoundError
+func (e taskNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such task: %s", e.taskID)
+}
+
+// NoFound indicates that this error type is of NotFound
+func (e taskNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrTaskNotFound returns true if the error is caused
+// when a task is not found.
+func IsErrTaskNotFound(err error) bool {
+ _, ok := err.(taskNotFoundError)
+ return ok
+}
+
+type pluginPermissionDenied struct {
+ name string
+}
+
+func (e pluginPermissionDenied) Error() string {
+ return "Permission denied while installing plugin " + e.name
+}
+
+// IsErrPluginPermissionDenied returns true if the error is caused
+// when a user denies a plugin's permissions
+func IsErrPluginPermissionDenied(err error) bool {
+ _, ok := err.(pluginPermissionDenied)
+ return ok
+}
diff --git a/vendor/github.com/docker/engine-api/client/events.go b/vendor/github.com/docker/engine-api/client/events.go
new file mode 100644
index 000000000..f22a18e1d
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/events.go
@@ -0,0 +1,48 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ timetypes "github.com/docker/engine-api/types/time"
+)
+
+// Events returns a stream of events in the daemon in a ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ ref := time.Now()
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+ if options.Until != "" {
+ ts, err := timetypes.GetTimestamp(options.Until, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("until", ts)
+ }
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ serverResponse, err := cli.get(ctx, "/events", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return serverResponse.body, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/hijack.go b/vendor/github.com/docker/engine-api/client/hijack.go
new file mode 100644
index 000000000..dbd91ef62
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/hijack.go
@@ -0,0 +1,174 @@
+package client
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/go-connections/sockets"
+ "golang.org/x/net/context"
+)
+
+// tlsClientCon holds tls information and a dialed connection.
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if conn, ok := c.rawConn.(types.CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// postHijacked sends a POST request and hijacks the connection.
+func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
+ bodyEncoded, err := encodeData(body)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ req, err := cli.newRequest("POST", path, query, bodyEncoded, headers)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+ req.Host = cli.addr
+
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "tcp")
+
+ conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig())
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
+ }
+ return types.HijackedResponse{}, err
+ }
+
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ clientconn := httputil.NewClientConn(conn, nil)
+ defer clientconn.Close()
+
+ // Server hijacks the connection, error 'connection closed' expected
+ _, err = clientconn.Do(req)
+
+ rwc, br := clientconn.Hijack()
+
+ return types.HijackedResponse{Conn: rwc, Reader: br}, err
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
+
+// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
+// order to return our custom tlsClientCon struct which holds both the tls.Conn
+// object _and_ its underlying raw connection. The rationale for this is that
+// we need to be able to close the write end of the connection when attaching,
+// which tls.Conn does not provide.
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ proxyDialer, err := sockets.DialerFromEnvironment(dialer)
+ if err != nil {
+ return nil, err
+ }
+
+ rawConn, err := proxyDialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := rawConn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ c := *config
+ c.ServerName = hostname
+ config = &c
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
+ if tlsConfig != nil && proto != "unix" && proto != "npipe" {
+ // Notice this isn't Go standard's tls.Dial function
+ return tlsDial(proto, addr, tlsConfig)
+ }
+ if proto == "npipe" {
+ return sockets.DialPipe(addr, 32*time.Second)
+ }
+ return net.Dial(proto, addr)
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_build.go b/vendor/github.com/docker/engine-api/client/image_build.go
new file mode 100644
index 000000000..0ceb88cf6
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_build.go
@@ -0,0 +1,119 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/container"
+)
+
+var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
+
+// ImageBuild sends request to the daemon to build images.
+// The Body in the response implement an io.ReadCloser and it's up to the caller to
+// close it.
+func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+ query, err := imageBuildOptionsToQuery(options)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ headers := http.Header(make(map[string][]string))
+ buf, err := json.Marshal(options.AuthConfigs)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+ headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+ headers.Set("Content-Type", "application/tar")
+
+ serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ osType := getDockerOS(serverResp.header.Get("Server"))
+
+ return types.ImageBuildResponse{
+ Body: serverResp.body,
+ OSType: osType,
+ }, nil
+}
+
+func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
+ query := url.Values{
+ "t": options.Tags,
+ }
+ if options.SuppressOutput {
+ query.Set("q", "1")
+ }
+ if options.RemoteContext != "" {
+ query.Set("remote", options.RemoteContext)
+ }
+ if options.NoCache {
+ query.Set("nocache", "1")
+ }
+ if options.Remove {
+ query.Set("rm", "1")
+ } else {
+ query.Set("rm", "0")
+ }
+
+ if options.ForceRemove {
+ query.Set("forcerm", "1")
+ }
+
+ if options.PullParent {
+ query.Set("pull", "1")
+ }
+
+ if !container.Isolation.IsDefault(options.Isolation) {
+ query.Set("isolation", string(options.Isolation))
+ }
+
+ query.Set("cpusetcpus", options.CPUSetCPUs)
+ query.Set("cpusetmems", options.CPUSetMems)
+ query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
+ query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
+ query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
+ query.Set("memory", strconv.FormatInt(options.Memory, 10))
+ query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
+ query.Set("cgroupparent", options.CgroupParent)
+ query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
+ query.Set("dockerfile", options.Dockerfile)
+
+ ulimitsJSON, err := json.Marshal(options.Ulimits)
+ if err != nil {
+ return query, err
+ }
+ query.Set("ulimits", string(ulimitsJSON))
+
+ buildArgsJSON, err := json.Marshal(options.BuildArgs)
+ if err != nil {
+ return query, err
+ }
+ query.Set("buildargs", string(buildArgsJSON))
+
+ labelsJSON, err := json.Marshal(options.Labels)
+ if err != nil {
+ return query, err
+ }
+ query.Set("labels", string(labelsJSON))
+ return query, nil
+}
+
+func getDockerOS(serverHeader string) string {
+ var osType string
+ matches := headerRegexp.FindStringSubmatch(serverHeader)
+ if len(matches) > 0 {
+ osType = matches[1]
+ }
+ return osType
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_create.go b/vendor/github.com/docker/engine-api/client/image_create.go
new file mode 100644
index 000000000..6dfc0391c
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_create.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/reference"
+)
+
+// ImageCreate creates a new image based in the parent options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
+ repository, tag, err := reference.Parse(parentReference)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", repository)
+ query.Set("tag", tag)
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/create", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_history.go b/vendor/github.com/docker/engine-api/client/image_history.go
new file mode 100644
index 000000000..b2840b5ed
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_history.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageHistory returns the changes in an image in history format.
+func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) {
+ var history []types.ImageHistory
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
+ if err != nil {
+ return history, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&history)
+ ensureReaderClosed(serverResp)
+ return history, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_import.go b/vendor/github.com/docker/engine-api/client/image_import.go
new file mode 100644
index 000000000..4e8749a01
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_import.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/engine-api/types"
+)
+
+// ImageImport creates a new image based in the source options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
+ if ref != "" {
+ //Check if the given image name can be resolved
+ if _, err := reference.ParseNamed(ref); err != nil {
+ return nil, err
+ }
+ }
+
+ query := url.Values{}
+ query.Set("fromSrc", source.SourceName)
+ query.Set("repo", ref)
+ query.Set("tag", options.Tag)
+ query.Set("message", options.Message)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+
+ resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_inspect.go b/vendor/github.com/docker/engine-api/client/image_inspect.go
new file mode 100644
index 000000000..859ba6408
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageInspectWithRaw returns the image information and its raw representation.
+func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) {
+ query := url.Values{}
+ if getSize {
+ query.Set("size", "1")
+ }
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ImageInspect{}, nil, imageNotFoundError{imageID}
+ }
+ return types.ImageInspect{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ImageInspect{}, nil, err
+ }
+
+ var response types.ImageInspect
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_list.go b/vendor/github.com/docker/engine-api/client/image_list.go
new file mode 100644
index 000000000..740825823
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_list.go
@@ -0,0 +1,40 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ImageList returns a list of images in the docker host.
+func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) {
+ var images []types.Image
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return images, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ if options.MatchName != "" {
+ // FIXME rename this parameter, to not be confused with the filters flag
+ query.Set("filter", options.MatchName)
+ }
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ serverResp, err := cli.get(ctx, "/images/json", query, nil)
+ if err != nil {
+ return images, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&images)
+ ensureReaderClosed(serverResp)
+ return images, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go
new file mode 100644
index 000000000..72f55fdc0
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_load.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+)
+
+// ImageLoad loads an image in the docker host from the client host.
+// It's up to the caller to close the io.ReadCloser in the
+// ImageLoadResponse returned by this function.
+func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
+ v := url.Values{}
+ v.Set("quiet", "0")
+ if quiet {
+ v.Set("quiet", "1")
+ }
+ headers := map[string][]string{"Content-Type": {"application/x-tar"}}
+ resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
+ if err != nil {
+ return types.ImageLoadResponse{}, err
+ }
+ return types.ImageLoadResponse{
+ Body: resp.body,
+ JSON: resp.header.Get("Content-Type") == "application/json",
+ }, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go
new file mode 100644
index 000000000..e2c49ec52
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_pull.go
@@ -0,0 +1,46 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/reference"
+)
+
+// ImagePull requests the docker host to pull an image from a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+//
+// FIXME(vdemeester): there is currently used in a few way in docker/docker
+// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
+// - if in trusted content, ref is used to pass the reference name, and tag for the digest
+func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) {
+ repository, tag, err := reference.Parse(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", repository)
+ if tag != "" && !options.All {
+ query.Set("tag", tag)
+ }
+
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go
new file mode 100644
index 000000000..89191ee30
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_push.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ distreference "github.com/docker/distribution/reference"
+ "github.com/docker/engine-api/types"
+)
+
+// ImagePush requests the docker host to push an image to a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) {
+ distributionRef, err := distreference.ParseNamed(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
+ return nil, errors.New("cannot push a digest reference")
+ }
+
+ var tag = ""
+ if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged {
+ tag = nameTaggedRef.Tag()
+ }
+
+ query := url.Values{}
+ query.Set("tag", tag)
+
+ resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_remove.go b/vendor/github.com/docker/engine-api/client/image_remove.go
new file mode 100644
index 000000000..47224326e
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_remove.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageRemove removes an image from the docker host.
+func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) {
+ query := url.Values{}
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+ if !options.PruneChildren {
+ query.Set("noprune", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var dels []types.ImageDelete
+ err = json.NewDecoder(resp.body).Decode(&dels)
+ ensureReaderClosed(resp)
+ return dels, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_save.go b/vendor/github.com/docker/engine-api/client/image_save.go
new file mode 100644
index 000000000..ecac880a3
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_save.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
+// It's up to the caller to store the images and close the stream.
+func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
+ query := url.Values{
+ "names": imageIDs,
+ }
+
+ resp, err := cli.get(ctx, "/images/get", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go
new file mode 100644
index 000000000..3940dfd79
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_search.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// ImageSearch makes the docker host to search by a term in a remote registry.
+// The list of results is not sorted in any fashion.
+func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
+ var results []registry.SearchResult
+ query := url.Values{}
+ query.Set("term", term)
+ query.Set("limit", fmt.Sprintf("%d", options.Limit))
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return results, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return results, privilegeErr
+ }
+ resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return results, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&results)
+ ensureReaderClosed(resp)
+ return results, err
+}
+
+func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/images/search", query, headers)
+}
diff --git a/vendor/github.com/docker/engine-api/client/image_tag.go b/vendor/github.com/docker/engine-api/client/image_tag.go
new file mode 100644
index 000000000..718291367
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/image_tag.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ distreference "github.com/docker/distribution/reference"
+ "github.com/docker/engine-api/types/reference"
+)
+
+// ImageTag tags an image in the docker host
+func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error {
+ distributionRef, err := distreference.ParseNamed(ref)
+ if err != nil {
+ return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref)
+ }
+
+ if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
+ return errors.New("refusing to create a tag with a digest reference")
+ }
+
+ tag := reference.GetTagFromNamedRef(distributionRef)
+
+ query := url.Values{}
+ query.Set("repo", distributionRef.Name())
+ query.Set("tag", tag)
+
+ resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/info.go b/vendor/github.com/docker/engine-api/client/info.go
new file mode 100644
index 000000000..ff0958d65
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/info.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// Info returns information about the docker server.
+func (cli *Client) Info(ctx context.Context) (types.Info, error) {
+ var info types.Info
+ serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
+ if err != nil {
+ return info, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
+ return info, fmt.Errorf("Error reading remote info: %v", err)
+ }
+
+ return info, nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/interface.go b/vendor/github.com/docker/engine-api/client/interface.go
new file mode 100644
index 000000000..1cadaef56
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/interface.go
@@ -0,0 +1,135 @@
+package client
+
+import (
+ "io"
+ "time"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/network"
+ "github.com/docker/engine-api/types/registry"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
+type CommonAPIClient interface {
+ ContainerAPIClient
+ ImageAPIClient
+ NodeAPIClient
+ NetworkAPIClient
+ ServiceAPIClient
+ SwarmAPIClient
+ SystemAPIClient
+ VolumeAPIClient
+ ClientVersion() string
+ ServerVersion(ctx context.Context) (types.Version, error)
+ UpdateClientVersion(v string)
+}
+
+// ContainerAPIClient defines API client methods for the containers
+type ContainerAPIClient interface {
+ ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
+ ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
+ ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)
+ ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error)
+ ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
+ ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error)
+ ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
+ ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
+ ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
+ ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
+ ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
+ ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
+ ContainerKill(ctx context.Context, container, signal string) error
+ ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
+ ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ ContainerPause(ctx context.Context, container string) error
+ ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
+ ContainerRename(ctx context.Context, container, newContainerName string) error
+ ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
+ ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
+ ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error)
+ ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
+ ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error)
+ ContainerUnpause(ctx context.Context, container string) error
+ ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error
+ ContainerWait(ctx context.Context, container string) (int, error)
+ CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
+ CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
+}
+
+// ImageAPIClient defines API client methods for the images
+type ImageAPIClient interface {
+ ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
+ ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
+ ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error)
+ ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
+ ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error)
+ ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error)
+ ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
+ ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
+ ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
+ ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error)
+ ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
+ ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
+ ImageTag(ctx context.Context, image, ref string) error
+}
+
+// NetworkAPIClient defines API client methods for the networks
+type NetworkAPIClient interface {
+ NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
+ NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
+ NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
+ NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error)
+ NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error)
+ NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
+ NetworkRemove(ctx context.Context, networkID string) error
+}
+
+// NodeAPIClient defines API client methods for the nodes
+type NodeAPIClient interface {
+ NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
+ NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string) error
+ NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+}
+
+// ServiceAPIClient defines API client methods for the services
+type ServiceAPIClient interface {
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
+ ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error)
+ ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceRemove(ctx context.Context, serviceID string) error
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error
+ TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+ TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+}
+
+// SwarmAPIClient defines API client methods for the swarm
+type SwarmAPIClient interface {
+ SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+ SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+ SwarmLeave(ctx context.Context, force bool) error
+ SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+ SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error
+}
+
+// SystemAPIClient defines API client methods for the system
+type SystemAPIClient interface {
+ Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error)
+ Info(ctx context.Context) (types.Info, error)
+ RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error)
+}
+
+// VolumeAPIClient defines API client methods for the volumes
+type VolumeAPIClient interface {
+ VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error)
+ VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
+ VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
+ VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error)
+ VolumeRemove(ctx context.Context, volumeID string) error
+}
diff --git a/vendor/github.com/docker/engine-api/client/interface_experimental.go b/vendor/github.com/docker/engine-api/client/interface_experimental.go
new file mode 100644
index 000000000..eb0cd7bf1
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/interface_experimental.go
@@ -0,0 +1,37 @@
+// +build experimental
+
+package client
+
+import (
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// APIClient is an interface that clients that talk with a docker server must implement.
+type APIClient interface {
+ CommonAPIClient
+ CheckpointAPIClient
+ PluginAPIClient
+}
+
+// CheckpointAPIClient defines API client methods for the checkpoints
+type CheckpointAPIClient interface {
+ CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
+ CheckpointDelete(ctx context.Context, container string, checkpointID string) error
+ CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error)
+}
+
+// PluginAPIClient defines API client methods for the plugins
+type PluginAPIClient interface {
+ PluginList(ctx context.Context) (types.PluginsListResponse, error)
+ PluginRemove(ctx context.Context, name string) error
+ PluginEnable(ctx context.Context, name string) error
+ PluginDisable(ctx context.Context, name string) error
+ PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error
+ PluginPush(ctx context.Context, name string, registryAuth string) error
+ PluginSet(ctx context.Context, name string, args []string) error
+ PluginInspect(ctx context.Context, name string) (*types.Plugin, error)
+}
+
+// Ensure that Client always implements APIClient.
+var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/engine-api/client/interface_stable.go b/vendor/github.com/docker/engine-api/client/interface_stable.go
new file mode 100644
index 000000000..496f522d5
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/interface_stable.go
@@ -0,0 +1,11 @@
+// +build !experimental
+
+package client
+
+// APIClient is an interface that clients that talk with a docker server must implement.
+type APIClient interface {
+ CommonAPIClient
+}
+
+// Ensure that Client always implements APIClient.
+var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/engine-api/client/login.go b/vendor/github.com/docker/engine-api/client/login.go
new file mode 100644
index 000000000..482f94789
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/login.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// RegistryLogin authenticates the docker server with a given docker registry.
+// It returns UnauthorizerError when the authentication fails.
+func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) {
+ resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
+
+ if resp != nil && resp.statusCode == http.StatusUnauthorized {
+ return types.AuthResponse{}, unauthorizedError{err}
+ }
+ if err != nil {
+ return types.AuthResponse{}, err
+ }
+
+ var response types.AuthResponse
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/network_connect.go b/vendor/github.com/docker/engine-api/client/network_connect.go
new file mode 100644
index 000000000..9a402a3e6
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/network_connect.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/network"
+ "golang.org/x/net/context"
+)
+
+// NetworkConnect connects a container to an existent network in the docker host.
+func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
+ nc := types.NetworkConnect{
+ Container: containerID,
+ EndpointConfig: config,
+ }
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/network_create.go b/vendor/github.com/docker/engine-api/client/network_create.go
new file mode 100644
index 000000000..c9c0b9fde
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/network_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkCreate creates a new network in the docker host.
+func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
+ networkCreateRequest := types.NetworkCreateRequest{
+ NetworkCreate: options,
+ Name: name,
+ }
+ var response types.NetworkCreateResponse
+ serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
+ if err != nil {
+ return response, err
+ }
+
+ json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/network_disconnect.go b/vendor/github.com/docker/engine-api/client/network_disconnect.go
new file mode 100644
index 000000000..a3e33672f
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/network_disconnect.go
@@ -0,0 +1,14 @@
+package client
+
+import (
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkDisconnect disconnects a container from an existent network in the docker host.
+func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
+ nd := types.NetworkDisconnect{Container: containerID, Force: force}
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/network_inspect.go b/vendor/github.com/docker/engine-api/client/network_inspect.go
new file mode 100644
index 000000000..e22fcd671
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/network_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkInspect returns the information for a specific network configured in the docker host.
+func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) {
+ networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID)
+ return networkResource, err
+}
+
+// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
+func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) {
+ var networkResource types.NetworkResource
+ resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return networkResource, nil, networkNotFoundError{networkID}
+ }
+ return networkResource, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return networkResource, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&networkResource)
+ return networkResource, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/network_list.go b/vendor/github.com/docker/engine-api/client/network_list.go
new file mode 100644
index 000000000..056955249
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/network_list.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworkList returns the list of networks configured in the docker host.
+func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
+ query := url.Values{}
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+ var networkResources []types.NetworkResource
+ resp, err := cli.get(ctx, "/networks", query, nil)
+ if err != nil {
+ return networkResources, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&networkResources)
+ ensureReaderClosed(resp)
+ return networkResources, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/network_remove.go b/vendor/github.com/docker/engine-api/client/network_remove.go
new file mode 100644
index 000000000..6bd674892
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/network_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NetworkRemove removes an existent network from the docker host.
+func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
+ resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/node_inspect.go b/vendor/github.com/docker/engine-api/client/node_inspect.go
new file mode 100644
index 000000000..5f555bb35
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/node_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeInspectWithRaw returns the node information.
+func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
+ serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Node{}, nil, nodeNotFoundError{nodeID}
+ }
+ return swarm.Node{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+
+ var response swarm.Node
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/node_list.go b/vendor/github.com/docker/engine-api/client/node_list.go
new file mode 100644
index 000000000..57cf14827
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/node_list.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+ query := url.Values{}
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filter)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/nodes", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var nodes []swarm.Node
+ err = json.NewDecoder(resp.body).Decode(&nodes)
+ ensureReaderClosed(resp)
+ return nodes, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/node_remove.go b/vendor/github.com/docker/engine-api/client/node_remove.go
new file mode 100644
index 000000000..a22ee93f4
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/node_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string) error {
+ resp, err := cli.delete(ctx, "/nodes/"+nodeID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/node_update.go b/vendor/github.com/docker/engine-api/client/node_update.go
new file mode 100644
index 000000000..472221151
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/node_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_disable.go b/vendor/github.com/docker/engine-api/client/plugin_disable.go
new file mode 100644
index 000000000..893fc6e82
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_disable.go
@@ -0,0 +1,14 @@
+// +build experimental
+
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginDisable disables a plugin
+func (cli *Client) PluginDisable(ctx context.Context, name string) error {
+ resp, err := cli.post(ctx, "/plugins/"+name+"/disable", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_enable.go b/vendor/github.com/docker/engine-api/client/plugin_enable.go
new file mode 100644
index 000000000..84422abc7
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_enable.go
@@ -0,0 +1,14 @@
+// +build experimental
+
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginEnable enables a plugin
+func (cli *Client) PluginEnable(ctx context.Context, name string) error {
+ resp, err := cli.post(ctx, "/plugins/"+name+"/enable", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_inspect.go b/vendor/github.com/docker/engine-api/client/plugin_inspect.go
new file mode 100644
index 000000000..b4bcc2006
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_inspect.go
@@ -0,0 +1,22 @@
+// +build experimental
+
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginInspect inspects an existing plugin
+func (cli *Client) PluginInspect(ctx context.Context, name string) (*types.Plugin, error) {
+ var p types.Plugin
+ resp, err := cli.get(ctx, "/plugins/"+name, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&p)
+ ensureReaderClosed(resp)
+ return &p, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_install.go b/vendor/github.com/docker/engine-api/client/plugin_install.go
new file mode 100644
index 000000000..3f5e59ff5
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_install.go
@@ -0,0 +1,59 @@
+// +build experimental
+
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginInstall installs a plugin
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error {
+ // FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
+ query := url.Values{}
+ query.Set("name", name)
+ resp, err := cli.tryPluginPull(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ ensureReaderClosed(resp)
+ return privilegeErr
+ }
+ resp, err = cli.tryPluginPull(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ ensureReaderClosed(resp)
+ return err
+ }
+ var privileges types.PluginPrivileges
+ if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
+ ensureReaderClosed(resp)
+ return err
+ }
+ ensureReaderClosed(resp)
+
+ if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
+ accept, err := options.AcceptPermissionsFunc(privileges)
+ if err != nil {
+ return err
+ }
+ if !accept {
+ resp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+ ensureReaderClosed(resp)
+ return pluginPermissionDenied{name}
+ }
+ }
+ if options.Disabled {
+ return nil
+ }
+ return cli.PluginEnable(ctx, name)
+}
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/pull", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_list.go b/vendor/github.com/docker/engine-api/client/plugin_list.go
new file mode 100644
index 000000000..7f2e2f21f
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_list.go
@@ -0,0 +1,23 @@
+// +build experimental
+
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginList returns the installed plugins
+func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) {
+ var plugins types.PluginsListResponse
+ resp, err := cli.get(ctx, "/plugins", nil, nil)
+ if err != nil {
+ return plugins, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&plugins)
+ ensureReaderClosed(resp)
+ return plugins, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_push.go b/vendor/github.com/docker/engine-api/client/plugin_push.go
new file mode 100644
index 000000000..3afea5ed7
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_push.go
@@ -0,0 +1,15 @@
+// +build experimental
+
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginPush pushes a plugin to a registry
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_remove.go b/vendor/github.com/docker/engine-api/client/plugin_remove.go
new file mode 100644
index 000000000..baf666556
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_remove.go
@@ -0,0 +1,14 @@
+// +build experimental
+
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginRemove removes a plugin
+func (cli *Client) PluginRemove(ctx context.Context, name string) error {
+ resp, err := cli.delete(ctx, "/plugins/"+name, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_set.go b/vendor/github.com/docker/engine-api/client/plugin_set.go
new file mode 100644
index 000000000..fb40f38b2
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/plugin_set.go
@@ -0,0 +1,14 @@
+// +build experimental
+
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginSet modifies settings for an existing plugin
+func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
+ resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/request.go b/vendor/github.com/docker/engine-api/client/request.go
new file mode 100644
index 000000000..854901559
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/request.go
@@ -0,0 +1,207 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/docker/engine-api/client/transport/cancellable"
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// serverResponse is a wrapper for http API responses.
+type serverResponse struct {
+ body io.ReadCloser
+ header http.Header
+ statusCode int
+}
+
+// head sends an http request to the docker API using the method HEAD.
+func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendRequest(ctx, "HEAD", path, query, nil, headers)
+}
+
+// getWithContext sends an http request to the docker API using the method GET with a specific go context.
+func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendRequest(ctx, "GET", path, query, nil, headers)
+}
+
+// postWithContext sends an http request to the docker API using the method POST with a specific go context.
+func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendRequest(ctx, "POST", path, query, obj, headers)
+}
+
+func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendClientRequest(ctx, "POST", path, query, body, headers)
+}
+
+// put sends an http request to the docker API using the method PUT.
+func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendRequest(ctx, "PUT", path, query, obj, headers)
+}
+
+// put sends an http request to the docker API using the method PUT.
+func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendClientRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// delete sends an http request to the docker API using the method DELETE.
+func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
+ return cli.sendRequest(ctx, "DELETE", path, query, nil, headers)
+}
+
+func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) {
+ var body io.Reader
+
+ if obj != nil {
+ var err error
+ body, err = encodeData(obj)
+ if err != nil {
+ return nil, err
+ }
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+ headers["Content-Type"] = []string{"application/json"}
+ }
+
+ return cli.sendClientRequest(ctx, method, path, query, body, headers)
+}
+
+func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
+ serverResp := &serverResponse{
+ body: nil,
+ statusCode: -1,
+ }
+
+ expectedPayload := (method == "POST" || method == "PUT")
+ if expectedPayload && body == nil {
+ body = bytes.NewReader([]byte{})
+ }
+
+ req, err := cli.newRequest(method, path, query, body, headers)
+ if err != nil {
+ return serverResp, err
+ }
+
+ if cli.proto == "unix" || cli.proto == "npipe" {
+ // For local communications, it doesn't matter what the host is. We just
+ // need a valid and meaningful host name. (See #189)
+ req.Host = "docker"
+ }
+ req.URL.Host = cli.addr
+ req.URL.Scheme = cli.transport.Scheme()
+
+ if expectedPayload && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+
+ resp, err := cancellable.Do(ctx, cli.transport, req)
+ if err != nil {
+ if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
+ return serverResp, ErrConnectionFailed
+ }
+
+ if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") {
+ return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
+ }
+
+ if cli.transport.Secure() && strings.Contains(err.Error(), "bad certificate") {
+ return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
+ }
+
+ return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err)
+ }
+
+ if resp != nil {
+ serverResp.statusCode = resp.StatusCode
+ }
+
+ if serverResp.statusCode < 200 || serverResp.statusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return serverResp, err
+ }
+ if len(body) == 0 {
+ return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL)
+ }
+
+ var errorMessage string
+ if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) &&
+ resp.Header.Get("Content-Type") == "application/json" {
+ var errorResponse types.ErrorResponse
+ if err := json.Unmarshal(body, &errorResponse); err != nil {
+ return serverResp, fmt.Errorf("Error reading JSON: %v", err)
+ }
+ errorMessage = errorResponse.Message
+ } else {
+ errorMessage = string(body)
+ }
+
+ return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage))
+ }
+
+ serverResp.body = resp.Body
+ serverResp.header = resp.Header
+ return serverResp, nil
+}
+
+func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) {
+ apiPath := cli.getAPIPath(path, query)
+ req, err := http.NewRequest(method, apiPath, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+ // then the user can't change OUR headers
+ for k, v := range cli.customHTTPHeaders {
+ req.Header.Set(k, v)
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+
+ return req, nil
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
+ }
+ }
+ return params, nil
+}
+
+func ensureReaderClosed(response *serverResponse) {
+ if response != nil && response.body != nil {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ io.CopyN(ioutil.Discard, response.body, 512)
+ response.body.Close()
+ }
+}
+
+func isTimeout(err error) bool {
+ type timeout interface {
+ Timeout() bool
+ }
+ e := err
+ switch urlErr := err.(type) {
+ case *url.Error:
+ e = urlErr.Err
+ }
+ t, ok := e.(timeout)
+ return ok && t.Timeout()
+}
diff --git a/vendor/github.com/docker/engine-api/client/service_create.go b/vendor/github.com/docker/engine-api/client/service_create.go
new file mode 100644
index 000000000..7349a984e
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/service_create.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceCreate creates a new Service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
+ var headers map[string][]string
+
+ if options.EncodedRegistryAuth != "" {
+ headers = map[string][]string{
+ "X-Registry-Auth": []string{options.EncodedRegistryAuth},
+ }
+ }
+
+ var response types.ServiceCreateResponse
+ resp, err := cli.post(ctx, "/services/create", nil, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/service_inspect.go b/vendor/github.com/docker/engine-api/client/service_inspect.go
new file mode 100644
index 000000000..958cd662e
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/service_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceInspectWithRaw returns the service information and the raw data.
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) {
+ serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Service{}, nil, serviceNotFoundError{serviceID}
+ }
+ return swarm.Service{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ var response swarm.Service
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/service_list.go b/vendor/github.com/docker/engine-api/client/service_list.go
new file mode 100644
index 000000000..b48964aa0
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/service_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+ query := url.Values{}
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/services", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var services []swarm.Service
+ err = json.NewDecoder(resp.body).Decode(&services)
+ ensureReaderClosed(resp)
+ return services, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/service_remove.go b/vendor/github.com/docker/engine-api/client/service_remove.go
new file mode 100644
index 000000000..a9331f92c
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/service_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+ resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/service_update.go b/vendor/github.com/docker/engine-api/client/service_update.go
new file mode 100644
index 000000000..ee8b46126
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/service_update.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceUpdate updates a Service.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error {
+ var (
+ headers map[string][]string
+ query = url.Values{}
+ )
+
+ if options.EncodedRegistryAuth != "" {
+ headers = map[string][]string{
+ "X-Registry-Auth": []string{options.EncodedRegistryAuth},
+ }
+ }
+
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+
+ resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_init.go b/vendor/github.com/docker/engine-api/client/swarm_init.go
new file mode 100644
index 000000000..68f0a744a
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/swarm_init.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInit initializes the Swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+ if err != nil {
+ return "", err
+ }
+
+ var response string
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_inspect.go b/vendor/github.com/docker/engine-api/client/swarm_inspect.go
new file mode 100644
index 000000000..d67c7c010
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/swarm_inspect.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInspect inspects the Swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ serverResp, err := cli.get(ctx, "/swarm", nil, nil)
+ if err != nil {
+ return swarm.Swarm{}, err
+ }
+
+ var response swarm.Swarm
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_join.go b/vendor/github.com/docker/engine-api/client/swarm_join.go
new file mode 100644
index 000000000..a9b14e0c4
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/swarm_join.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmJoin joins the Swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_leave.go b/vendor/github.com/docker/engine-api/client/swarm_leave.go
new file mode 100644
index 000000000..a4df73217
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/swarm_leave.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// SwarmLeave leaves the Swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+ query := url.Values{}
+ if force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_update.go b/vendor/github.com/docker/engine-api/client/swarm_update.go
new file mode 100644
index 000000000..568474af7
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/swarm_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUpdate updates the Swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/task_inspect.go b/vendor/github.com/docker/engine-api/client/task_inspect.go
new file mode 100644
index 000000000..3cac8882e
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/task_inspect.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/engine-api/types/swarm"
+
+ "golang.org/x/net/context"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation..
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Task{}, nil, taskNotFoundError{taskID}
+ }
+ return swarm.Task{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ var response swarm.Task
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/task_list.go b/vendor/github.com/docker/engine-api/client/task_list.go
new file mode 100644
index 000000000..4604513ca
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/task_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/engine-api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// TaskList returns the list of tasks.
+func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+ query := url.Values{}
+
+ if options.Filter.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filter)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/tasks", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var tasks []swarm.Task
+ err = json.NewDecoder(resp.body).Decode(&tasks)
+ ensureReaderClosed(resp)
+ return tasks, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go
new file mode 100644
index 000000000..11dff6002
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package cancellable
+
+import (
+ "net/http"
+
+ "github.com/docker/engine-api/client/transport"
+)
+
+func canceler(client transport.Sender, req *http.Request) func() {
+ // TODO(djd): Respect any existing value of req.Cancel.
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ return func() {
+ close(ch)
+ }
+}
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go
new file mode 100644
index 000000000..8ff2845c2
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package cancellable
+
+import (
+ "net/http"
+
+ "github.com/docker/engine-api/client/transport"
+)
+
+type requestCanceler interface {
+ CancelRequest(*http.Request)
+}
+
+func canceler(client transport.Sender, req *http.Request) func() {
+ rc, ok := client.(requestCanceler)
+ if !ok {
+ return func() {}
+ }
+ return func() {
+ rc.CancelRequest(req)
+ }
+}
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go
new file mode 100644
index 000000000..139414957
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go
@@ -0,0 +1,113 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cancellable provides helper function to cancel http requests.
+package cancellable
+
+import (
+ "io"
+ "net/http"
+
+ "github.com/docker/engine-api/client/transport"
+
+ "golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+ testHookContextDoneBeforeHeaders = nop
+ testHookDoReturned = nop
+ testHookDidBodyClose = nop
+)
+
+// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+//
+// FORK INFORMATION:
+//
+// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by
+// taking a Sender interface rather than a *http.Client directly. That allow us to use
+// this function with mocked clients and hijacked connections.
+func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go.
+ cancel := canceler(client, req)
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ go func() {
+ resp, err := client.Do(req)
+ testHookDoReturned()
+ result <- responseAndError{resp, err}
+ }()
+
+ var resp *http.Response
+
+ select {
+ case <-ctx.Done():
+ testHookContextDoneBeforeHeaders()
+ cancel()
+ // Clean up after the goroutine calling client.Do:
+ go func() {
+ if r := <-result; r.resp != nil && r.resp.Body != nil {
+ testHookDidBodyClose()
+ r.resp.Body.Close()
+ }
+ }()
+ return nil, ctx.Err()
+ case r := <-result:
+ var err error
+ resp, err = r.resp, r.err
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ c := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ cancel()
+ case <-c:
+ // The response's Body is closed.
+ }
+ }()
+ resp.Body = ¬ifyingReader{resp.Body, c}
+
+ return resp, nil
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+ io.ReadCloser
+ notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+ n, err := r.ReadCloser.Read(p)
+ if err != nil && r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return n, err
+}
+
+func (r *notifyingReader) Close() error {
+ err := r.ReadCloser.Close()
+ if r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/client/transport/client.go b/vendor/github.com/docker/engine-api/client/transport/client.go
new file mode 100644
index 000000000..13d4b3ab3
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/transport/client.go
@@ -0,0 +1,47 @@
+package transport
+
+import (
+ "crypto/tls"
+ "net/http"
+)
+
+// Sender is an interface that clients must implement
+// to be able to send requests to a remote connection.
+type Sender interface {
+ // Do sends request to a remote endpoint.
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Client is an interface that abstracts all remote connections.
+type Client interface {
+ Sender
+ // Secure tells whether the connection is secure or not.
+ Secure() bool
+ // Scheme returns the connection protocol the client uses.
+ Scheme() string
+ // TLSConfig returns any TLS configuration the client uses.
+ TLSConfig() *tls.Config
+}
+
+// tlsInfo returns information about the TLS configuration.
+type tlsInfo struct {
+ tlsConfig *tls.Config
+}
+
+// TLSConfig returns the TLS configuration.
+func (t *tlsInfo) TLSConfig() *tls.Config {
+ return t.tlsConfig
+}
+
+// Scheme returns protocol scheme to use.
+func (t *tlsInfo) Scheme() string {
+ if t.tlsConfig != nil {
+ return "https"
+ }
+ return "http"
+}
+
+// Secure returns true if there is a TLS configuration.
+func (t *tlsInfo) Secure() bool {
+ return t.tlsConfig != nil
+}
diff --git a/vendor/github.com/docker/engine-api/client/transport/transport.go b/vendor/github.com/docker/engine-api/client/transport/transport.go
new file mode 100644
index 000000000..ff28af185
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/transport/transport.go
@@ -0,0 +1,57 @@
+// Package transport provides function to send request to remote endpoints.
+package transport
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/docker/go-connections/sockets"
+)
+
+// apiTransport holds information about the http transport to connect with the API.
+type apiTransport struct {
+ *http.Client
+ *tlsInfo
+ transport *http.Transport
+}
+
+// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client.
+// It uses Docker's default http transport configuration if the client is nil.
+// It does not modify the client's transport if it's not nil.
+func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) {
+ var transport *http.Transport
+
+ if client != nil {
+ tr, ok := client.Transport.(*http.Transport)
+ if !ok {
+ return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
+ }
+ transport = tr
+ } else {
+ transport = defaultTransport(proto, addr)
+ client = &http.Client{
+ Transport: transport,
+ }
+ }
+
+ return &apiTransport{
+ Client: client,
+ tlsInfo: &tlsInfo{transport.TLSClientConfig},
+ transport: transport,
+ }, nil
+}
+
+// CancelRequest stops a request execution.
+func (a *apiTransport) CancelRequest(req *http.Request) {
+ a.transport.CancelRequest(req)
+}
+
+// defaultTransport creates a new http.Transport with Docker's
+// default transport configuration.
+func defaultTransport(proto, addr string) *http.Transport {
+ tr := new(http.Transport)
+ sockets.ConfigureTransport(tr, proto, addr)
+ return tr
+}
+
+var _ Client = &apiTransport{}
diff --git a/vendor/github.com/docker/engine-api/client/version.go b/vendor/github.com/docker/engine-api/client/version.go
new file mode 100644
index 000000000..e037551a2
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/version.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// ServerVersion returns information of the docker client and server host.
+func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
+ resp, err := cli.get(ctx, "/version", nil, nil)
+ if err != nil {
+ return types.Version{}, err
+ }
+
+ var server types.Version
+ err = json.NewDecoder(resp.body).Decode(&server)
+ ensureReaderClosed(resp)
+ return server, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/volume_create.go b/vendor/github.com/docker/engine-api/client/volume_create.go
new file mode 100644
index 000000000..cc1e1c177
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/volume_create.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// VolumeCreate creates a volume in the docker host.
+func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) {
+ var volume types.Volume
+ resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
+ if err != nil {
+ return volume, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&volume)
+ ensureReaderClosed(resp)
+ return volume, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/volume_inspect.go b/vendor/github.com/docker/engine-api/client/volume_inspect.go
new file mode 100644
index 000000000..2eaebfafa
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/volume_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/engine-api/types"
+ "golang.org/x/net/context"
+)
+
+// VolumeInspect returns the information about a specific volume in the docker host.
+func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
+ volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
+ return volume, err
+}
+
+// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
+func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+ var volume types.Volume
+ resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return volume, nil, volumeNotFoundError{volumeID}
+ }
+ return volume, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return volume, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&volume)
+ return volume, body, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/volume_list.go b/vendor/github.com/docker/engine-api/client/volume_list.go
new file mode 100644
index 000000000..7c6ccf834
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/volume_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/engine-api/types"
+ "github.com/docker/engine-api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// VolumeList returns the volumes configured in the docker host.
+func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) {
+ var volumes types.VolumesListResponse
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return volumes, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/volumes", query, nil)
+ if err != nil {
+ return volumes, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&volumes)
+ ensureReaderClosed(resp)
+ return volumes, err
+}
diff --git a/vendor/github.com/docker/engine-api/client/volume_remove.go b/vendor/github.com/docker/engine-api/client/volume_remove.go
new file mode 100644
index 000000000..0dce24c79
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/client/volume_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// VolumeRemove removes a volume from the docker host.
+func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error {
+ resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go
new file mode 100644
index 000000000..056af6b84
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/auth.go
@@ -0,0 +1,22 @@
+package types
+
+// AuthConfig contains authorization information for connecting to a Registry
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // Email is an optional value associated with the username.
+ // This field is deprecated and will be removed in a later
+ // version of docker.
+ Email string `json:"email,omitempty"`
+
+ ServerAddress string `json:"serveraddress,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go
new file mode 100644
index 000000000..931ae10ab
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go
@@ -0,0 +1,23 @@
+package blkiodev
+
+import "fmt"
+
+// WeightDevice is a structure that holds device:weight pair
+type WeightDevice struct {
+ Path string
+ Weight uint16
+}
+
+func (w *WeightDevice) String() string {
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
+}
+
+// ThrottleDevice is a structure that holds device:rate_per_second pair
+type ThrottleDevice struct {
+ Path string
+ Rate uint64
+}
+
+func (t *ThrottleDevice) String() string {
+ return fmt.Sprintf("%s:%d", t.Path, t.Rate)
+}
diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go
new file mode 100644
index 000000000..def3f0619
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/client.go
@@ -0,0 +1,286 @@
+package types
+
+import (
+ "bufio"
+ "io"
+ "net"
+
+ "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/filters"
+ "github.com/docker/go-units"
+)
+
+// CheckpointCreateOptions holds parameters to create a checkpoint from a container
+type CheckpointCreateOptions struct {
+ CheckpointID string
+ Exit bool
+}
+
+// ContainerAttachOptions holds parameters to attach to a container.
+type ContainerAttachOptions struct {
+ Stream bool
+ Stdin bool
+ Stdout bool
+ Stderr bool
+ DetachKeys string
+}
+
+// ContainerCommitOptions holds parameters to commit changes into a container.
+type ContainerCommitOptions struct {
+ Reference string
+ Comment string
+ Author string
+ Changes []string
+ Pause bool
+ Config *container.Config
+}
+
+// ContainerExecInspect holds information returned by exec inspect.
+type ContainerExecInspect struct {
+ ExecID string
+ ContainerID string
+ Running bool
+ ExitCode int
+}
+
+// ContainerListOptions holds parameters to list containers with.
+type ContainerListOptions struct {
+ Quiet bool
+ Size bool
+ All bool
+ Latest bool
+ Since string
+ Before string
+ Limit int
+ Filter filters.Args
+}
+
+// ContainerLogsOptions holds parameters to filter logs with.
+type ContainerLogsOptions struct {
+ ShowStdout bool
+ ShowStderr bool
+ Since string
+ Timestamps bool
+ Follow bool
+ Tail string
+ Details bool
+}
+
+// ContainerRemoveOptions holds parameters to remove containers.
+type ContainerRemoveOptions struct {
+ RemoveVolumes bool
+ RemoveLinks bool
+ Force bool
+}
+
+// ContainerStartOptions holds parameters to start containers.
+type ContainerStartOptions struct {
+ CheckpointID string
+}
+
+// CopyToContainerOptions holds information
+// about files to copy into a container
+type CopyToContainerOptions struct {
+ AllowOverwriteDirWithFile bool
+}
+
+// EventsOptions hold parameters to filter events with.
+type EventsOptions struct {
+ Since string
+ Until string
+ Filters filters.Args
+}
+
+// NetworkListOptions holds parameters to filter the list of networks with.
+type NetworkListOptions struct {
+ Filters filters.Args
+}
+
+// HijackedResponse holds connection information for a hijacked request.
+type HijackedResponse struct {
+ Conn net.Conn
+ Reader *bufio.Reader
+}
+
+// Close closes the hijacked connection and reader.
+func (h *HijackedResponse) Close() {
+ h.Conn.Close()
+}
+
+// CloseWriter is an interface that implements structs
+// that close input streams to prevent from writing.
+type CloseWriter interface {
+ CloseWrite() error
+}
+
+// CloseWrite closes a readWriter for writing.
+func (h *HijackedResponse) CloseWrite() error {
+ if conn, ok := h.Conn.(CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+type ImageBuildOptions struct {
+ Tags []string
+ SuppressOutput bool
+ RemoteContext string
+ NoCache bool
+ Remove bool
+ ForceRemove bool
+ PullParent bool
+ Isolation container.Isolation
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares int64
+ CPUQuota int64
+ CPUPeriod int64
+ Memory int64
+ MemorySwap int64
+ CgroupParent string
+ ShmSize int64
+ Dockerfile string
+ Ulimits []*units.Ulimit
+ BuildArgs map[string]string
+ AuthConfigs map[string]AuthConfig
+ Context io.Reader
+ Labels map[string]string
+}
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+type ImageBuildResponse struct {
+ Body io.ReadCloser
+ OSType string
+}
+
+// ImageCreateOptions holds information to create images.
+type ImageCreateOptions struct {
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+}
+
+// ImageImportSource holds source information for ImageImport
+type ImageImportSource struct {
+ Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
+ SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
+}
+
+// ImageImportOptions holds information to import images from the client host.
+type ImageImportOptions struct {
+ Tag string // Tag is the name to tag this image with. This attribute is deprecated.
+ Message string // Message is the message to tag the image with
+ Changes []string // Changes are the raw changes to apply to this image
+}
+
+// ImageListOptions holds parameters to filter the list of images with.
+type ImageListOptions struct {
+ MatchName string
+ All bool
+ Filters filters.Args
+}
+
+// ImageLoadResponse returns information to the client about a load process.
+type ImageLoadResponse struct {
+ // Body must be closed to avoid a resource leak
+ Body io.ReadCloser
+ JSON bool
+}
+
+// ImagePullOptions holds information to pull images.
+type ImagePullOptions struct {
+ All bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ PrivilegeFunc RequestPrivilegeFunc
+}
+
+// RequestPrivilegeFunc is a function interface that
+// clients can supply to retry operations after
+// getting an authorization error.
+// This function returns the registry authentication
+// header value in base 64 format, or an error
+// if the privilege request fails.
+type RequestPrivilegeFunc func() (string, error)
+
+//ImagePushOptions holds information to push images.
+type ImagePushOptions ImagePullOptions
+
+// ImageRemoveOptions holds parameters to remove images.
+type ImageRemoveOptions struct {
+ Force bool
+ PruneChildren bool
+}
+
+// ImageSearchOptions holds parameters to search images with.
+type ImageSearchOptions struct {
+ RegistryAuth string
+ PrivilegeFunc RequestPrivilegeFunc
+ Filters filters.Args
+ Limit int
+}
+
+// ResizeOptions holds parameters to resize a tty.
+// It can be used to resize container ttys and
+// exec process ttys too.
+type ResizeOptions struct {
+ Height int
+ Width int
+}
+
+// VersionResponse holds version information for the client and the server
+type VersionResponse struct {
+ Client *Version
+ Server *Version
+}
+
+// ServerOK returns true when the client could connect to the docker server
+// and parse the information received. It returns false otherwise.
+func (v VersionResponse) ServerOK() bool {
+ return v.Server != nil
+}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filter filters.Args
+}
+
+// ServiceCreateOptions contains the options to use when creating a service.
+type ServiceCreateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+}
+
+// ServiceCreateResponse contains the information returned to a client
+// on the creation of a new service.
+type ServiceCreateResponse struct {
+ // ID is the ID of the created service.
+ ID string
+}
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+type ServiceUpdateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
+ // into this field. While it does open API users up to racy writes, most
+ // users may not need that level of consistency in practice.
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filter filters.Args
+}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filter filters.Args
+}
diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go
new file mode 100644
index 000000000..7d4fcb343
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/configs.go
@@ -0,0 +1,53 @@
+package types
+
+import (
+ "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/network"
+)
+
+// configs holds structs used for internal communication between the
+// frontend (such as an http server) and the backend (such as the
+// docker daemon).
+
+// ContainerCreateConfig is the parameter set to ContainerCreate()
+type ContainerCreateConfig struct {
+ Name string
+ Config *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+ AdjustCPUShares bool
+}
+
+// ContainerRmConfig holds arguments for the container remove
+// operation. This struct is used to tell the backend what operations
+// to perform.
+type ContainerRmConfig struct {
+ ForceRemove, RemoveVolume, RemoveLink bool
+}
+
+// ContainerCommitConfig contains build configs for commit operation,
+// and is used when making a commit with the current state of the container.
+type ContainerCommitConfig struct {
+ Pause bool
+ Repo string
+ Tag string
+ Author string
+ Comment string
+ // merge container config into commit config before commit
+ MergeConfigs bool
+ Config *container.Config
+}
+
+// ExecConfig is a small subset of the Config struct that holds the configuration
+// for the exec feature of docker.
+type ExecConfig struct {
+ User string // User that will run the command
+ Privileged bool // Is the container in privileged mode
+ Tty bool // Attach standard streams to a tty.
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStderr bool // Attach the standard output
+ AttachStdout bool // Attach the standard error
+ Detach bool // Execute in detach mode
+ DetachKeys string // Escape keys for detach
+ Cmd []string // Execution commands and args
+}
diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go
new file mode 100644
index 000000000..707fc8c17
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/container/config.go
@@ -0,0 +1,62 @@
+package container
+
+import (
+ "time"
+
+ "github.com/docker/engine-api/types/strslice"
+ "github.com/docker/go-connections/nat"
+)
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go
new file mode 100644
index 000000000..a9ff755b0
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/container/host_config.go
@@ -0,0 +1,320 @@
+package container
+
+import (
+ "strings"
+
+ "github.com/docker/engine-api/types/blkiodev"
+ "github.com/docker/engine-api/types/strslice"
+ "github.com/docker/go-connections/nat"
+ "github.com/docker/go-units"
+)
+
+// NetworkMode represents the container network stack.
+type NetworkMode string
+
+// Isolation represents the isolation technology of a container. The supported
+// values are platform specific
+type Isolation string
+
+// IsDefault indicates the default isolation technology of a container. On Linux this
+// is the native driver. On Windows, this is a Windows Server Container.
+func (i Isolation) IsDefault() bool {
+ return strings.ToLower(string(i)) == "default" || string(i) == ""
+}
+
+// IpcMode represents the container ipc stack.
+type IpcMode string
+
+// IsPrivate indicates whether the container uses its private ipc stack.
+func (n IpcMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's ipc stack.
+func (n IpcMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's ipc stack.
+func (n IpcMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the ipc stack is valid.
+func (n IpcMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container ipc stack is going to be used.
+func (n IpcMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// UsernsMode represents userns mode in the container.
+type UsernsMode string
+
+// IsHost indicates whether the container uses the host's userns.
+func (n UsernsMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsPrivate indicates whether the container uses the a private userns.
+func (n UsernsMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// Valid indicates whether the userns is valid.
+func (n UsernsMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// CgroupSpec represents the cgroup to use for the container.
+type CgroupSpec string
+
+// IsContainer indicates whether the container is using another container cgroup
+func (c CgroupSpec) IsContainer() bool {
+ parts := strings.SplitN(string(c), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the cgroup spec is valid.
+func (c CgroupSpec) Valid() bool {
+ return c.IsContainer() || c == ""
+}
+
+// Container returns the name of the container whose cgroup will be used.
+func (c CgroupSpec) Container() string {
+ parts := strings.SplitN(string(c), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// UTSMode represents the UTS namespace of the container.
+type UTSMode string
+
+// IsPrivate indicates whether the container uses its private UTS namespace.
+func (n UTSMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// IsHost indicates whether the container uses the host's UTS namespace.
+func (n UTSMode) IsHost() bool {
+ return n == "host"
+}
+
+// Valid indicates whether the UTS namespace is valid.
+func (n UTSMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// PidMode represents the pid namespace of the container.
+type PidMode string
+
+// IsPrivate indicates whether the container uses its own new pid namespace.
+func (n PidMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's pid namespace.
+func (n PidMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's pid namespace.
+func (n PidMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the pid namespace is valid.
+func (n PidMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container whose pid namespace is going to be used.
+func (n PidMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// DeviceMapping represents the device mapping between the host and the container.
+type DeviceMapping struct {
+ PathOnHost string
+ PathInContainer string
+ CgroupPermissions string
+}
+
+// RestartPolicy represents the restart policies of the container.
+type RestartPolicy struct {
+ Name string
+ MaximumRetryCount int
+}
+
+// IsNone indicates whether the container has the "no" restart policy.
+// This means the container will not automatically restart when exiting.
+func (rp *RestartPolicy) IsNone() bool {
+ return rp.Name == "no" || rp.Name == ""
+}
+
+// IsAlways indicates whether the container has the "always" restart policy.
+// This means the container will automatically restart regardless of the exit status.
+func (rp *RestartPolicy) IsAlways() bool {
+ return rp.Name == "always"
+}
+
+// IsOnFailure indicates whether the container has the "on-failure" restart policy.
+// This means the container will automatically restart of exiting with a non-zero exit status.
+func (rp *RestartPolicy) IsOnFailure() bool {
+ return rp.Name == "on-failure"
+}
+
+// IsUnlessStopped indicates whether the container has the
+// "unless-stopped" restart policy. This means the container will
+// automatically restart unless user has put it to stopped state.
+func (rp *RestartPolicy) IsUnlessStopped() bool {
+ return rp.Name == "unless-stopped"
+}
+
+// IsSame compares two RestartPolicy to see if they are the same
+func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
+ return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
+}
+
+// LogConfig represents the logging configuration of the container.
+type LogConfig struct {
+ Type string
+ Config map[string]string
+}
+
+// Resources contains container's resources (cgroups config, ulimits...)
+type Resources struct {
+ // Applicable to all platforms
+ CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
+ Memory int64 // Memory limit (in bytes)
+
+ // Applicable to UNIX platforms
+ CgroupParent string // Parent cgroup.
+ BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
+ BlkioWeightDevice []*blkiodev.WeightDevice
+ BlkioDeviceReadBps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
+ BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
+ CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
+ CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
+ CpusetCpus string // CpusetCpus 0-2, 0,1
+ CpusetMems string // CpusetMems 0-2, 0,1
+ Devices []DeviceMapping // List of devices to map inside the container
+ DiskQuota int64 // Disk limit (in bytes)
+ KernelMemory int64 // Kernel memory limit (in bytes)
+ MemoryReservation int64 // Memory soft limit (in bytes)
+ MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwappiness *int64 // Tuning container memory swappiness behaviour
+ OomKillDisable *bool // Whether to disable OOM Killer or not
+ PidsLimit int64 // Setting pids limit for a container
+ Ulimits []*units.Ulimit // List of ulimits to be set in the container
+
+ // Applicable to Windows
+ CPUCount int64 `json:"CpuCount"` // CPU count
+ CPUPercent int64 `json:"CpuPercent"` // CPU percent
+ IOMaximumIOps uint64 // Maximum IOps for the container system drive
+ IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
+}
+
+// UpdateConfig holds the mutable attributes of a Container.
+// Those attributes can be updated at runtime.
+type UpdateConfig struct {
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+ RestartPolicy RestartPolicy
+}
+
+// HostConfig the non-portable Config structure of a container.
+// Here, "non-portable" means "dependent of the host we are running on".
+// Portable information *should* appear in Config.
+type HostConfig struct {
+ // Applicable to all platforms
+ Binds []string // List of volume bindings for this container
+ ContainerIDFile string // File (path) where the containerId is written
+ LogConfig LogConfig // Configuration of the logs for this container
+ NetworkMode NetworkMode // Network mode to use for the container
+ PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
+ RestartPolicy RestartPolicy // Restart policy to be used for the container
+ AutoRemove bool // Automatically remove container when it exits
+ VolumeDriver string // Name of the volume driver used to mount volumes
+ VolumesFrom []string // List of volumes to take from other container
+
+ // Applicable to UNIX platforms
+ CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
+ CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
+ DNS []string `json:"Dns"` // List of DNS server to lookup
+ DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
+ DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
+ ExtraHosts []string // List of extra hosts
+ GroupAdd []string // List of additional groups that the container process will run as
+ IpcMode IpcMode // IPC namespace to use for the container
+ Cgroup CgroupSpec // Cgroup to use for the container
+ Links []string // List of links (in the name:alias form)
+ OomScoreAdj int // Container preference for OOM-killing
+ PidMode PidMode // PID namespace to use for the container
+ Privileged bool // Is the container in privileged mode
+ PublishAllPorts bool // Should docker publish all exposed port for the container
+ ReadonlyRootfs bool // Is the container root filesystem in read-only
+ SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
+ StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
+ Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
+ UTSMode UTSMode // UTS namespace to use for the container
+ UsernsMode UsernsMode // The user namespace to use for the container
+ ShmSize int64 // Total shm memory usage
+ Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
+ Runtime string `json:",omitempty"` // Runtime to use with this container
+
+ // Applicable to Windows
+ ConsoleSize [2]int // Initial console size
+ Isolation Isolation // Isolation technology of the container (eg default, hyperv)
+
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+}
diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go
new file mode 100644
index 000000000..4171059a4
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go
@@ -0,0 +1,81 @@
+// +build !windows
+
+package container
+
+import "strings"
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault()
+}
+
+// IsPrivate indicates whether container uses it's private network stack.
+func (n NetworkMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsDefault indicates whether container uses the default network stack.
+func (n NetworkMode) IsDefault() bool {
+ return n == "default"
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsBridge() {
+ return "bridge"
+ } else if n.IsHost() {
+ return "host"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsDefault() {
+ return "default"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+ return ""
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+func (n NetworkMode) IsBridge() bool {
+ return n == "bridge"
+}
+
+// IsHost indicates whether container uses the host network stack.
+func (n NetworkMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether container uses a container network stack.
+func (n NetworkMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// IsNone indicates whether container isn't using a network stack.
+func (n NetworkMode) IsNone() bool {
+ return n == "none"
+}
+
+// ConnectedContainer is the id of the container which network this container is connected to.
+func (n NetworkMode) ConnectedContainer() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
+}
+
+//UserDefined indicates user-created network
+func (n NetworkMode) UserDefined() string {
+ if n.IsUserDefined() {
+ return string(n)
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go
new file mode 100644
index 000000000..0ee332ba6
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go
@@ -0,0 +1,87 @@
+package container
+
+import (
+ "strings"
+)
+
+// IsDefault indicates whether container uses the default network stack.
+func (n NetworkMode) IsDefault() bool {
+ return n == "default"
+}
+
+// IsNone indicates whether container isn't using a network stack.
+func (n NetworkMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsContainer indicates whether container uses a container network stack.
+// Returns false as windows doesn't support this mode
+func (n NetworkMode) IsContainer() bool {
+ return false
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+// in windows it is given the name NAT
+func (n NetworkMode) IsBridge() bool {
+ return n == "nat"
+}
+
+// IsHost indicates whether container uses the host network stack.
+// returns false as this is not supported by windows
+func (n NetworkMode) IsHost() bool {
+ return false
+}
+
+// IsPrivate indicates whether container uses its private network stack.
+func (n NetworkMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// ConnectedContainer is the id of the container which network this container is connected to.
+// Returns blank string on windows
+func (n NetworkMode) ConnectedContainer() string {
+ return ""
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
+}
+
+// IsHyperV indicates the use of a Hyper-V partition for isolation
+func (i Isolation) IsHyperV() bool {
+ return strings.ToLower(string(i)) == "hyperv"
+}
+
+// IsProcess indicates the use of process isolation
+func (i Isolation) IsProcess() bool {
+ return strings.ToLower(string(i)) == "process"
+}
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault() || i.IsHyperV() || i.IsProcess()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsDefault() {
+ return "default"
+ } else if n.IsBridge() {
+ return "nat"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+
+ return ""
+}
+
+//UserDefined indicates user-created network
+func (n NetworkMode) UserDefined() string {
+ if n.IsUserDefined() {
+ return string(n)
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/engine-api/types/errors.go b/vendor/github.com/docker/engine-api/types/errors.go
new file mode 100644
index 000000000..649ab9513
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/errors.go
@@ -0,0 +1,6 @@
+package types
+
+// ErrorResponse is the response body of API errors.
+type ErrorResponse struct {
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go
new file mode 100644
index 000000000..dc2c48b89
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/filters/parse.go
@@ -0,0 +1,307 @@
+// Package filters provides helper function to parse and handle command line
+// filter, used for example in docker ps or docker images commands.
+package filters
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/docker/engine-api/types/versions"
+)
+
+// Args stores filter arguments as map key:{map key: bool}.
+// It contains an aggregation of the map of arguments (which are in the form
+// of -f 'key=value') based on the key, and stores values for the same key
+// in a map with string keys and boolean values.
+// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
+// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+type Args struct {
+ fields map[string]map[string]bool
+}
+
+// NewArgs initializes a new Args struct.
+func NewArgs() Args {
+ return Args{fields: map[string]map[string]bool{}}
+}
+
+// ParseFlag parses the argument to the filter flag. Like
+//
+// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+//
+// If prev map is provided, then it is appended to, and returned. By default a new
+// map is created.
+func ParseFlag(arg string, prev Args) (Args, error) {
+ filters := prev
+ if len(arg) == 0 {
+ return filters, nil
+ }
+
+ if !strings.Contains(arg, "=") {
+ return filters, ErrBadFormat
+ }
+
+ f := strings.SplitN(arg, "=", 2)
+
+ name := strings.ToLower(strings.TrimSpace(f[0]))
+ value := strings.TrimSpace(f[1])
+
+ filters.Add(name, value)
+
+ return filters, nil
+}
+
+// ErrBadFormat is an error returned in case of bad format for a filter.
+var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
+
+// ToParam packs the Args into a string for easy transport from client to server.
+func ToParam(a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ buf, err := json.Marshal(a.fields)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// ToParamWithVersion packs the Args into a string for easy transport from client to server.
+// The generated string will depend on the specified version (corresponding to the API version).
+func ToParamWithVersion(version string, a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ // for daemons older than v1.10, filter must be of the form map[string][]string
+ buf := []byte{}
+ err := errors.New("")
+ if version != "" && versions.LessThan(version, "1.22") {
+ buf, err = json.Marshal(convertArgsToSlice(a.fields))
+ } else {
+ buf, err = json.Marshal(a.fields)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// FromParam unpacks the filter Args.
+func FromParam(p string) (Args, error) {
+ if len(p) == 0 {
+ return NewArgs(), nil
+ }
+
+ r := strings.NewReader(p)
+ d := json.NewDecoder(r)
+
+ m := map[string]map[string]bool{}
+ if err := d.Decode(&m); err != nil {
+ r.Seek(0, 0)
+
+ // Allow parsing old arguments in slice format.
+ // Because other libraries might be sending them in this format.
+ deprecated := map[string][]string{}
+ if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
+ m = deprecatedArgs(deprecated)
+ } else {
+ return NewArgs(), err
+ }
+ }
+ return Args{m}, nil
+}
+
+// Get returns the list of values associates with a field.
+// It returns a slice of strings to keep backwards compatibility with old code.
+func (filters Args) Get(field string) []string {
+ values := filters.fields[field]
+ if values == nil {
+ return make([]string, 0)
+ }
+ slice := make([]string, 0, len(values))
+ for key := range values {
+ slice = append(slice, key)
+ }
+ return slice
+}
+
+// Add adds a new value to a filter field.
+func (filters Args) Add(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ filters.fields[name][value] = true
+ } else {
+ filters.fields[name] = map[string]bool{value: true}
+ }
+}
+
+// Del removes a value from a filter field.
+func (filters Args) Del(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ delete(filters.fields[name], value)
+ }
+}
+
+// Len returns the number of fields in the arguments.
+func (filters Args) Len() int {
+ return len(filters.fields)
+}
+
+// MatchKVList returns true if the values for the specified field matches the ones
+// from the sources.
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'label' and sources are {'label1': '1', 'label2': '2'}
+// it returns true.
+func (filters Args) MatchKVList(field string, sources map[string]string) bool {
+ fieldValues := filters.fields[field]
+
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+
+ if sources == nil || len(sources) == 0 {
+ return false
+ }
+
+ for name2match := range fieldValues {
+ testKV := strings.SplitN(name2match, "=", 2)
+
+ v, ok := sources[testKV[0]]
+ if !ok {
+ return false
+ }
+ if len(testKV) == 2 && testKV[1] != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Match returns true if the values for the specified field matches the source string
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'image.name' and source is 'ubuntu'
+// it returns true.
+func (filters Args) Match(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for name2match := range fieldValues {
+ match, err := regexp.MatchString(name2match, source)
+ if err != nil {
+ continue
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// ExactMatch returns true if the source matches exactly one of the filters.
+func (filters Args) ExactMatch(field, source string) bool {
+ fieldValues, ok := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if !ok || len(fieldValues) == 0 {
+ return true
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
+func (filters Args) UniqueExactMatch(field, source string) bool {
+ fieldValues := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+ if len(filters.fields[field]) != 1 {
+ return false
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// FuzzyMatch returns true if the source matches exactly one of the filters,
+// or the source has one of the filters as a prefix.
+func (filters Args) FuzzyMatch(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for prefix := range fieldValues {
+ if strings.HasPrefix(source, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Include returns true if the name of the field to filter is in the filters.
+func (filters Args) Include(field string) bool {
+ _, ok := filters.fields[field]
+ return ok
+}
+
+// Validate ensures that all the fields in the filter are valid.
+// It returns an error as soon as it finds an invalid field.
+func (filters Args) Validate(accepted map[string]bool) error {
+ for name := range filters.fields {
+ if !accepted[name] {
+ return fmt.Errorf("Invalid filter '%s'", name)
+ }
+ }
+ return nil
+}
+
+// WalkValues iterates over the list of filtered values for a field.
+// It stops the iteration if it finds an error and it returns that error.
+func (filters Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := filters.fields[field]; !ok {
+ return nil
+ }
+ for v := range filters.fields[field] {
+ if err := op(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
+ m := map[string]map[string]bool{}
+ for k, v := range d {
+ values := map[string]bool{}
+ for _, vv := range v {
+ values[vv] = true
+ }
+ m[k] = values
+ }
+ return m
+}
+
+func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
+ m := map[string][]string{}
+ for k, v := range f {
+ values := []string{}
+ for kk := range v {
+ if v[kk] {
+ values = append(values, kk)
+ }
+ }
+ m[k] = values
+ }
+ return m
+}
diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go
new file mode 100644
index 000000000..47080b652
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/network/network.go
@@ -0,0 +1,53 @@
+package network
+
+// Address represents an IP address
+type Address struct {
+ Addr string
+ PrefixLen int
+}
+
+// IPAM represents IP Address Management
+type IPAM struct {
+ Driver string
+ Options map[string]string //Per network IPAM driver options
+ Config []IPAMConfig
+}
+
+// IPAMConfig represents IPAM configurations
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+// EndpointIPAMConfig represents IPAM configurations for the endpoint
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+ LinkLocalIPs []string `json:",omitempty"`
+}
+
+// EndpointSettings stores the network endpoint details
+type EndpointSettings struct {
+ // Configurations
+ IPAMConfig *EndpointIPAMConfig
+ Links []string
+ Aliases []string
+ // Operational data
+ NetworkID string
+ EndpointID string
+ Gateway string
+ IPAddress string
+ IPPrefixLen int
+ IPv6Gateway string
+ GlobalIPv6Address string
+ GlobalIPv6PrefixLen int
+ MacAddress string
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
+}
diff --git a/vendor/github.com/docker/engine-api/types/plugin.go b/vendor/github.com/docker/engine-api/types/plugin.go
new file mode 100644
index 000000000..05030ff3d
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/plugin.go
@@ -0,0 +1,169 @@
+// +build experimental
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// PluginInstallOptions holds parameters to install a plugin.
+type PluginInstallOptions struct {
+ Disabled bool
+ AcceptAllPermissions bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ PrivilegeFunc RequestPrivilegeFunc
+ AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
+}
+
+// PluginConfig represents the values of settings potentially modifiable by a user
+type PluginConfig struct {
+ Mounts []PluginMount
+ Env []string
+ Args []string
+ Devices []PluginDevice
+}
+
+// Plugin represents a Docker plugin for the remote API
+type Plugin struct {
+ ID string `json:"Id,omitempty"`
+ Name string
+ Tag string
+ Active bool
+ Config PluginConfig
+ Manifest PluginManifest
+}
+
+// PluginsListResponse contains the response for the remote API
+type PluginsListResponse []*Plugin
+
+const (
+ authzDriver = "AuthzDriver"
+ graphDriver = "GraphDriver"
+ ipamDriver = "IpamDriver"
+ networkDriver = "NetworkDriver"
+ volumeDriver = "VolumeDriver"
+)
+
+// PluginInterfaceType represents a type that a plugin implements.
+type PluginInterfaceType struct {
+ Prefix string // This is always "docker"
+ Capability string // Capability should be validated against the above list.
+ Version string // Plugin API version. Depends on the capability
+}
+
+// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
+func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
+ versionIndex := len(p)
+ prefixIndex := 0
+ if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
+ return fmt.Errorf("%q is not a plugin interface type", p)
+ }
+ p = p[1 : len(p)-1]
+loop:
+ for i, b := range p {
+ switch b {
+ case '.':
+ prefixIndex = i
+ case '/':
+ versionIndex = i
+ break loop
+ }
+ }
+ t.Prefix = string(p[:prefixIndex])
+ t.Capability = string(p[prefixIndex+1 : versionIndex])
+ if versionIndex < len(p) {
+ t.Version = string(p[versionIndex+1:])
+ }
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler for PluginInterfaceType
+func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String implements fmt.Stringer for PluginInterfaceType
+func (t PluginInterfaceType) String() string {
+ return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
+}
+
+// PluginInterface describes the interface between Docker and plugin
+type PluginInterface struct {
+ Types []PluginInterfaceType
+ Socket string
+}
+
+// PluginSetting is to be embedded in other structs, if they are supposed to be
+// modifiable by the user.
+type PluginSetting struct {
+ Name string
+ Description string
+ Settable []string
+}
+
+// PluginNetwork represents the network configuration for a plugin
+type PluginNetwork struct {
+ Type string
+}
+
+// PluginMount represents the mount configuration for a plugin
+type PluginMount struct {
+ PluginSetting
+ Source *string
+ Destination string
+ Type string
+ Options []string
+}
+
+// PluginEnv represents an environment variable for a plugin
+type PluginEnv struct {
+ PluginSetting
+ Value *string
+}
+
+// PluginArgs represents the command line arguments for a plugin
+type PluginArgs struct {
+ PluginSetting
+ Value []string
+}
+
+// PluginDevice represents a device for a plugin
+type PluginDevice struct {
+ PluginSetting
+ Path *string
+}
+
+// PluginUser represents the user for the plugin's process
+type PluginUser struct {
+ UID uint32 `json:"Uid,omitempty"`
+ GID uint32 `json:"Gid,omitempty"`
+}
+
+// PluginManifest represents the manifest of a plugin
+type PluginManifest struct {
+ ManifestVersion string
+ Description string
+ Documentation string
+ Interface PluginInterface
+ Entrypoint []string
+ Workdir string
+ User PluginUser `json:",omitempty"`
+ Network PluginNetwork
+ Capabilities []string
+ Mounts []PluginMount
+ Devices []PluginDevice
+ Env []PluginEnv
+ Args PluginArgs
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string
+ Description string
+ Value []string
+}
+
+// PluginPrivileges is a list of PluginPrivilege
+type PluginPrivileges []PluginPrivilege
diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/github.com/docker/engine-api/types/reference/image_reference.go
new file mode 100644
index 000000000..be9cf8ebe
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/reference/image_reference.go
@@ -0,0 +1,34 @@
+package reference
+
+import (
+ distreference "github.com/docker/distribution/reference"
+)
+
+// Parse parses the given references and returns the repository and
+// tag (if present) from it. If there is an error during parsing, it will
+// return an error.
+func Parse(ref string) (string, string, error) {
+ distributionRef, err := distreference.ParseNamed(ref)
+ if err != nil {
+ return "", "", err
+ }
+
+ tag := GetTagFromNamedRef(distributionRef)
+ return distributionRef.Name(), tag, nil
+}
+
+// GetTagFromNamedRef returns a tag from the specified reference.
+// This function is necessary as long as the docker "server" api makes the distinction between repository
+// and tags.
+func GetTagFromNamedRef(ref distreference.Named) string {
+ var tag string
+ switch x := ref.(type) {
+ case distreference.Digested:
+ tag = x.Digest().String()
+ case distreference.NamedTagged:
+ tag = x.Tag()
+ default:
+ tag = "latest"
+ }
+ return tag
+}
diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go
new file mode 100644
index 000000000..d2aca6f02
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/registry/registry.go
@@ -0,0 +1,99 @@
+package registry
+
+import (
+ "encoding/json"
+ "net"
+)
+
+// ServiceConfig stores daemon registry services configuration.
+type ServiceConfig struct {
+ InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
+ IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
+ Mirrors []string
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON
+type NetIPNet net.IPNet
+
+// MarshalJSON returns the JSON representation of the IPNet
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
+ var ipnetStr string
+ if err = json.Unmarshal(b, &ipnetStr); err == nil {
+ var cidr *net.IPNet
+ if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
+ *ipnet = NetIPNet(*cidr)
+ }
+ }
+ return
+}
+
+// IndexInfo contains information about a registry
+//
+// RepositoryInfo Examples:
+// {
+// "Index" : {
+// "Name" : "docker.io",
+// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
+// "Secure" : true,
+// "Official" : true,
+// },
+// "RemoteName" : "library/debian",
+// "LocalName" : "debian",
+// "CanonicalName" : "docker.io/debian"
+// "Official" : true,
+// }
+//
+// {
+// "Index" : {
+// "Name" : "127.0.0.1:5000",
+// "Mirrors" : [],
+// "Secure" : false,
+// "Official" : false,
+// },
+// "RemoteName" : "user/repo",
+// "LocalName" : "127.0.0.1:5000/user/repo",
+// "CanonicalName" : "127.0.0.1:5000/user/repo",
+// "Official" : false,
+// }
+type IndexInfo struct {
+ // Name is the name of the registry, such as "docker.io"
+ Name string
+ // Mirrors is a list of mirrors, expressed as URIs
+ Mirrors []string
+ // Secure is set to false if the registry is part of the list of
+ // insecure registries. Insecure registries accept HTTP and/or accept
+ // HTTPS with certificates from unknown CAs.
+ Secure bool
+ // Official indicates whether this is an official registry
+ Official bool
+}
+
+// SearchResult describes a search result returned from a registry
+type SearchResult struct {
+ // StarCount indicates the number of stars this repository has
+ StarCount int `json:"star_count"`
+ // IsOfficial is true if the result is from an official repository.
+ IsOfficial bool `json:"is_official"`
+ // Name is the name of the repository
+ Name string `json:"name"`
+ // IsAutomated indicates whether the result is automated
+ IsAutomated bool `json:"is_automated"`
+ // Description is a textual description of the repository
+ Description string `json:"description"`
+}
+
+// SearchResults lists a collection search results returned from a registry
+type SearchResults struct {
+ // Query contains the query string that generated the search results
+ Query string `json:"query"`
+ // NumResults indicates the number of results the query returned
+ NumResults int `json:"num_results"`
+ // Results is a slice containing the actual results for the search
+ Results []SearchResult `json:"results"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go
new file mode 100644
index 000000000..854f1c453
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/seccomp.go
@@ -0,0 +1,73 @@
+package types
+
+// Seccomp represents the config for a seccomp profile for syscall restriction.
+type Seccomp struct {
+ DefaultAction Action `json:"defaultAction"`
+ Architectures []Arch `json:"architectures"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Arch used for additional architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+ ActKill Action = "SCMP_ACT_KILL"
+ ActTrap Action = "SCMP_ACT_TRAP"
+ ActErrno Action = "SCMP_ACT_ERRNO"
+ ActTrace Action = "SCMP_ACT_TRACE"
+ ActAllow Action = "SCMP_ACT_ALLOW"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual Operator = "SCMP_CMP_NE"
+ OpLessThan Operator = "SCMP_CMP_LT"
+ OpLessEqual Operator = "SCMP_CMP_LE"
+ OpEqualTo Operator = "SCMP_CMP_EQ"
+ OpGreaterEqual Operator = "SCMP_CMP_GE"
+ OpGreaterThan Operator = "SCMP_CMP_GT"
+ OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo"`
+ Op Operator `json:"op"`
+}
+
+// Syscall is used to match a syscall in Seccomp
+type Syscall struct {
+ Name string `json:"name"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go
new file mode 100644
index 000000000..b420ebe7f
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/stats.go
@@ -0,0 +1,115 @@
+// Package types is used for API stability in the types and response to the
+// consumers of the API stats endpoint.
+package types
+
+import "time"
+
+// ThrottlingData stores CPU throttling stats of one running container
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods"`
+ // Number of periods when the container hits its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time"`
+}
+
+// CPUUsage stores All CPU stats aggregated since container inception.
+type CPUUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds.
+ TotalUsage uint64 `json:"total_usage"`
+ // Total CPU time consumed per core.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage"`
+ // Time spent by tasks of the cgroup in kernel mode.
+ // Units: nanoseconds.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+ // Time spent by tasks of the cgroup in user mode.
+ // Units: nanoseconds.
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+// CPUStats aggregates and wraps all CPU related info of container
+type CPUStats struct {
+ CPUUsage CPUUsage `json:"cpu_usage"`
+ SystemUsage uint64 `json:"system_cpu_usage"`
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+// MemoryStats aggregates All memory stats since container inception
+type MemoryStats struct {
+ // current res_counter usage for memory
+ Usage uint64 `json:"usage"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage"`
+ // TODO(vishh): Export these as stronger types.
+ // all the stats exported via memory.stat.
+ Stats map[string]uint64 `json:"stats"`
+ // number of times memory usage hits limits.
+ Failcnt uint64 `json:"failcnt"`
+ Limit uint64 `json:"limit"`
+}
+
+// BlkioStatEntry is one small entity to store a piece of Blkio stats
+// TODO Windows: This can be factored out
+type BlkioStatEntry struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Op string `json:"op"`
+ Value uint64 `json:"value"`
+}
+
+// BlkioStats stores All IO service stats for data read and write
+// TODO Windows: This can be factored out
+type BlkioStats struct {
+ // number of bytes transferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
+}
+
+// NetworkStats aggregates All network stats of one container
+// TODO Windows: This will require refactoring
+type NetworkStats struct {
+ RxBytes uint64 `json:"rx_bytes"`
+ RxPackets uint64 `json:"rx_packets"`
+ RxErrors uint64 `json:"rx_errors"`
+ RxDropped uint64 `json:"rx_dropped"`
+ TxBytes uint64 `json:"tx_bytes"`
+ TxPackets uint64 `json:"tx_packets"`
+ TxErrors uint64 `json:"tx_errors"`
+ TxDropped uint64 `json:"tx_dropped"`
+}
+
+// PidsStats contains the stats of a container's pids
+type PidsStats struct {
+ // Current is the number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // Limit is the hard limit on the number of pids in the cgroup.
+ // A "Limit" of 0 means that there is no limit.
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+// Stats is Ultimate struct aggregating all types of stats of one container
+type Stats struct {
+ Read time.Time `json:"read"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
+ CPUStats CPUStats `json:"cpu_stats,omitempty"`
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+}
+
+// StatsJSON is newly used Networks
+type StatsJSON struct {
+ Stats
+
+ // Networks request version >=1.21
+ Networks map[string]NetworkStats `json:"networks,omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go
new file mode 100644
index 000000000..bad493fb8
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/common.go b/vendor/github.com/docker/engine-api/types/swarm/common.go
new file mode 100644
index 000000000..b87f54536
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/common.go
@@ -0,0 +1,21 @@
+package swarm
+
+import "time"
+
+// Version represent the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// Meta is base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/container.go b/vendor/github.com/docker/engine-api/types/swarm/container.go
new file mode 100644
index 000000000..29f2e8a64
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/container.go
@@ -0,0 +1,67 @@
+package swarm
+
+import "time"
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Mounts []Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+}
+
+// MountType represents the type of a mount.
+type MountType string
+
+const (
+ // MountTypeBind BIND
+ MountTypeBind MountType = "bind"
+ // MountTypeVolume VOLUME
+ MountTypeVolume MountType = "volume"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type MountType `json:",omitempty"`
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+}
+
+// MountPropagation represents the propagation of a mount.
+type MountPropagation string
+
+const (
+ // MountPropagationRPrivate RPRIVATE
+ MountPropagationRPrivate MountPropagation = "rprivate"
+ // MountPropagationPrivate PRIVATE
+ MountPropagationPrivate MountPropagation = "private"
+ // MountPropagationRShared RSHARED
+ MountPropagationRShared MountPropagation = "rshared"
+ // MountPropagationShared SHARED
+ MountPropagationShared MountPropagation = "shared"
+ // MountPropagationRSlave RSLAVE
+ MountPropagationRSlave MountPropagation = "rslave"
+ // MountPropagationSlave SLAVE
+ MountPropagationSlave MountPropagation = "slave"
+)
+
+// BindOptions defines options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation MountPropagation `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ NoCopy bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/network.go b/vendor/github.com/docker/engine-api/types/swarm/network.go
new file mode 100644
index 000000000..84804da2f
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/network.go
@@ -0,0 +1,99 @@
+package swarm
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ TargetPort uint32 `json:",omitempty"`
+ PublishedPort uint32 `json:",omitempty"`
+}
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachment.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attachment.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
+
+// Driver represents a driver (network/volume).
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/node.go b/vendor/github.com/docker/engine-api/types/swarm/node.go
new file mode 100644
index 000000000..8421f67a2
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/node.go
@@ -0,0 +1,118 @@
+package swarm
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+
+ Spec NodeSpec `json:",omitempty"`
+ Description NodeDescription `json:",omitempty"`
+ Status NodeStatus `json:",omitempty"`
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Membership NodeMembership `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeMembership represents the membership of a node.
+type NodeMembership string
+
+const (
+ // NodeMembershipPending PENDING
+ NodeMembershipPending NodeMembership = "pending"
+ // NodeMembershipAccepted ACCEPTED
+ NodeMembershipAccepted NodeMembership = "accepted"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+}
+
+// Platform represents the platfrom (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
diff --git a/vendor/github.com/docker/engine-api/types/swarm/service.go b/vendor/github.com/docker/engine-api/types/swarm/service.go
new file mode 100644
index 000000000..6303c146f
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/service.go
@@ -0,0 +1,44 @@
+package swarm
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // orchestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ Parallelism uint64 `json:",omitempty"`
+ Delay time.Duration `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/github.com/docker/engine-api/types/swarm/swarm.go
new file mode 100644
index 000000000..0a0685d0f
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/swarm.go
@@ -0,0 +1,129 @@
+package swarm
+
+import "time"
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ID string
+ Meta
+ Spec Spec
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ AcceptancePolicy AcceptancePolicy `json:",omitempty"`
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+
+ // DefaultLogDriver sets the log driver to use at task creation time if
+ // unspecified by a task.
+ //
+ // Updating this value will only have an affect on new tasks. Old tasks
+ // will continue use their previously configured log driver until
+ // recreated.
+ DefaultLogDriver *Driver `json:",omitempty"`
+}
+
+// AcceptancePolicy represents the list of policies.
+type AcceptancePolicy struct {
+ Policies []Policy `json:",omitempty"`
+}
+
+// Policy represents a role, autoaccept and secret.
+type Policy struct {
+ Role NodeRole
+ Autoaccept bool
+ Secret *string `json:",omitempty"`
+}
+
+// OrchestrationConfig represents orchestration configuration.
+type OrchestrationConfig struct {
+ TaskHistoryRetentionLimit int64 `json:",omitempty"`
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ SnapshotInterval uint64 `json:",omitempty"`
+ KeepOldSnapshots uint64 `json:",omitempty"`
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+ HeartbeatTick uint32 `json:",omitempty"`
+ ElectionTick uint32 `json:",omitempty"`
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ HeartbeatPeriod uint64 `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ NodeCertExpiry time.Duration `json:",omitempty"`
+ ExternalCAs []*ExternalCA `json:",omitempty"`
+}
+
+// ExternalCAProtocol represents type of external CA.
+type ExternalCAProtocol string
+
+// ExternalCAProtocolCFSSL CFSSL
+const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
+
+// ExternalCA defines external CA to be used by the cluster.
+type ExternalCA struct {
+ Protocol ExternalCAProtocol
+ URL string
+ Options map[string]string `json:",omitempty"`
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ ForceNewCluster bool
+ Spec Spec
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ RemoteAddrs []string
+ Secret string // accept by secret
+ CACertHash string
+ Manager bool
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int
+ Managers int
+ CACertHash string
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/task.go b/vendor/github.com/docker/engine-api/types/swarm/task.go
new file mode 100644
index 000000000..fa8228a49
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/swarm/task.go
@@ -0,0 +1,115 @@
+package swarm
+
+import "time"
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ ContainerSpec ContainerSpec `json:",omitempty"`
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+
+ // LogDriver specifies the LogDriver to use for tasks created from this
+ // spec. If not present, the one on cluster default on swarm.Spec will be
+ // used, finally falling back to the engine default if not specified.
+ LogDriver *Driver `json:",omitempty"`
+}
+
+// Resources represents resources (CPU/Memory).
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Resources `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus ContainerStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string `json:",omitempty"`
+ PID int `json:",omitempty"`
+ ExitCode int `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/time/duration_convert.go b/vendor/github.com/docker/engine-api/types/time/duration_convert.go
new file mode 100644
index 000000000..63e1eec19
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/time/duration_convert.go
@@ -0,0 +1,12 @@
+package time
+
+import (
+ "strconv"
+ "time"
+)
+
+// DurationToSecondsString converts the specified duration to the number
+// seconds it represents, formatted as a string.
+func DurationToSecondsString(duration time.Duration) string {
+ return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
+}
diff --git a/vendor/github.com/docker/engine-api/types/time/timestamp.go b/vendor/github.com/docker/engine-api/types/time/timestamp.go
new file mode 100644
index 000000000..d3695ba72
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/time/timestamp.go
@@ -0,0 +1,124 @@
+package time
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ var parseInLocation bool
+
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") {
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
+// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
+// if the incoming nanosecond portion is longer or shorter than 9 digits it is
+// converted to nanoseconds. The expectation is that the seconds and
+// seconds will be used to create a time variable. For example:
+// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
+// if err == nil since := time.Unix(seconds, nanoseconds)
+// returns seconds as def(aultSeconds) if value == ""
+func ParseTimestamps(value string, def int64) (int64, int64, error) {
+ if value == "" {
+ return def, 0, nil
+ }
+ sa := strings.SplitN(value, ".", 2)
+ s, err := strconv.ParseInt(sa[0], 10, 64)
+ if err != nil {
+ return s, 0, err
+ }
+ if len(sa) != 2 {
+ return s, 0, nil
+ }
+ n, err := strconv.ParseInt(sa[1], 10, 64)
+ if err != nil {
+ return s, n, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseonds
+ n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
+ return s, n, nil
+}
diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go
new file mode 100644
index 000000000..3cc8db8c1
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/types.go
@@ -0,0 +1,511 @@
+package types
+
+import (
+ "os"
+ "time"
+
+ "github.com/docker/engine-api/types/container"
+ "github.com/docker/engine-api/types/network"
+ "github.com/docker/engine-api/types/registry"
+ "github.com/docker/engine-api/types/swarm"
+ "github.com/docker/go-connections/nat"
+)
+
+// ContainerCreateResponse contains the information returned to a client on the
+// creation of a new container.
+type ContainerCreateResponse struct {
+ // ID is the ID of the created container.
+ ID string `json:"Id"`
+
+ // Warnings are any warnings encountered during the creation of the container.
+ Warnings []string `json:"Warnings"`
+}
+
+// ContainerExecCreateResponse contains response of Remote API:
+// POST "/containers/{name:.*}/exec"
+type ContainerExecCreateResponse struct {
+ // ID is the exec ID.
+ ID string `json:"Id"`
+}
+
+// ContainerUpdateResponse contains response of Remote API:
+// POST "/containers/{name:.*}/update"
+type ContainerUpdateResponse struct {
+ // Warnings are any warnings encountered during the updating of the container.
+ Warnings []string `json:"Warnings"`
+}
+
+// AuthResponse contains response of Remote API:
+// POST "/auth"
+type AuthResponse struct {
+ // Status is the authentication status
+ Status string `json:"Status"`
+
+ // IdentityToken is an opaque token used for authenticating
+ // a user after a successful login.
+ IdentityToken string `json:"IdentityToken,omitempty"`
+}
+
+// ContainerWaitResponse contains response of Remote API:
+// POST "/containers/"+containerID+"/wait"
+type ContainerWaitResponse struct {
+ // StatusCode is the status code of the wait job
+ StatusCode int `json:"StatusCode"`
+}
+
+// ContainerCommitResponse contains response of Remote API:
+// POST "/commit?container="+containerID
+type ContainerCommitResponse struct {
+ ID string `json:"Id"`
+}
+
+// ContainerChange contains response of Remote API:
+// GET "/containers/{name:.*}/changes"
+type ContainerChange struct {
+ Kind int
+ Path string
+}
+
+// ImageHistory contains response of Remote API:
+// GET "/images/{name:.*}/history"
+type ImageHistory struct {
+ ID string `json:"Id"`
+ Created int64
+ CreatedBy string
+ Tags []string
+ Size int64
+ Comment string
+}
+
+// ImageDelete contains response of Remote API:
+// DELETE "/images/{name:.*}"
+type ImageDelete struct {
+ Untagged string `json:",omitempty"`
+ Deleted string `json:",omitempty"`
+}
+
+// Image contains response of Remote API:
+// GET "/images/json"
+type Image struct {
+ ID string `json:"Id"`
+ ParentID string `json:"ParentId"`
+ RepoTags []string
+ RepoDigests []string
+ Created int64
+ Size int64
+ VirtualSize int64
+ Labels map[string]string
+}
+
+// GraphDriverData returns Image's graph driver config info
+// when calling inspect command
+type GraphDriverData struct {
+ Name string
+ Data map[string]string
+}
+
+// RootFS returns Image's RootFS description including the layer IDs.
+type RootFS struct {
+ Type string
+ Layers []string `json:",omitempty"`
+ BaseLayer string `json:",omitempty"`
+}
+
+// ImageInspect contains response of Remote API:
+// GET "/images/{name:.*}/json"
+type ImageInspect struct {
+ ID string `json:"Id"`
+ RepoTags []string
+ RepoDigests []string
+ Parent string
+ Comment string
+ Created string
+ Container string
+ ContainerConfig *container.Config
+ DockerVersion string
+ Author string
+ Config *container.Config
+ Architecture string
+ Os string
+ Size int64
+ VirtualSize int64
+ GraphDriver GraphDriverData
+ RootFS RootFS
+}
+
+// Port stores open ports info of container
+// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"}
+type Port struct {
+ IP string `json:",omitempty"`
+ PrivatePort int
+ PublicPort int `json:",omitempty"`
+ Type string
+}
+
+// Container contains response of Remote API:
+// GET "/containers/json"
+type Container struct {
+ ID string `json:"Id"`
+ Names []string
+ Image string
+ ImageID string
+ Command string
+ Created int64
+ Ports []Port
+ SizeRw int64 `json:",omitempty"`
+ SizeRootFs int64 `json:",omitempty"`
+ Labels map[string]string
+ State string
+ Status string
+ HostConfig struct {
+ NetworkMode string `json:",omitempty"`
+ }
+ NetworkSettings *SummaryNetworkSettings
+ Mounts []MountPoint
+}
+
+// CopyConfig contains request body of Remote API:
+// POST "/containers/"+containerID+"/copy"
+type CopyConfig struct {
+ Resource string
+}
+
+// ContainerPathStat is used to encode the header from
+// GET "/containers/{name:.*}/archive"
+// "Name" is the file or directory name.
+type ContainerPathStat struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mode os.FileMode `json:"mode"`
+ Mtime time.Time `json:"mtime"`
+ LinkTarget string `json:"linkTarget"`
+}
+
+// ContainerProcessList contains response of Remote API:
+// GET "/containers/{name:.*}/top"
+type ContainerProcessList struct {
+ Processes [][]string
+ Titles []string
+}
+
+// Version contains response of Remote API:
+// GET "/version"
+type Version struct {
+ Version string
+ APIVersion string `json:"ApiVersion"`
+ GitCommit string
+ GoVersion string
+ Os string
+ Arch string
+ KernelVersion string `json:",omitempty"`
+ Experimental bool `json:",omitempty"`
+ BuildTime string `json:",omitempty"`
+}
+
+// Info contains response of Remote API:
+// GET "/info"
+type Info struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ NFd int
+ OomKillDisable bool
+ NGoroutines int
+ SystemTime string
+ ExecutionDriver string
+ LoggingDriver string
+ CgroupDriver string
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ RegistryConfig *registry.ServiceConfig
+ NCPU int
+ MemTotal int64
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ExperimentalBuild bool
+ ServerVersion string
+ ClusterStore string
+ ClusterAdvertise string
+ SecurityOptions []string
+ Runtimes map[string]Runtime
+ DefaultRuntime string
+ Swarm swarm.Info
+}
+
+// PluginsInfo is a temp struct holding Plugins name
+// registered with docker daemon. It is used by Info struct
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+}
+
+// ExecStartCheck is a temp struct used by execStart
+// Config fields is part of ExecConfig in runconfig package
+type ExecStartCheck struct {
+ // ExecStart will first check if it's detached
+ Detach bool
+ // Check if there's a tty
+ Tty bool
+}
+
+// HealthcheckResult stores information about a single run of a healthcheck probe
+type HealthcheckResult struct {
+ Start time.Time // Start is the time this check started
+ End time.Time // End is the time this check ended
+ ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=starting, else=error running probe
+ Output string // Output from last check
+}
+
+// Health states
+const (
+ Starting = "starting" // Starting indicates that the container is not yet ready
+ Healthy = "healthy" // Healthy indicates that the container is running correctly
+ Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
+)
+
+// Health stores information about the container's healthcheck results
+type Health struct {
+ Status string // Status is one of Starting, Healthy or Unhealthy
+ FailingStreak int // FailingStreak is the number of consecutive failures
+ Log []*HealthcheckResult // Log contains the last few results (oldest first)
+}
+
+// ContainerState stores container's running state
+// it's part of ContainerJSONBase and will return by "inspect" command
+type ContainerState struct {
+ Status string
+ Running bool
+ Paused bool
+ Restarting bool
+ OOMKilled bool
+ Dead bool
+ Pid int
+ ExitCode int
+ Error string
+ StartedAt string
+ FinishedAt string
+ Health *Health `json:",omitempty"`
+}
+
+// ContainerNode stores information about the node that a container
+// is running on. It's only available in Docker Swarm
+type ContainerNode struct {
+ ID string
+ IPAddress string `json:"IP"`
+ Addr string
+ Name string
+ Cpus int
+ Memory int64
+ Labels map[string]string
+}
+
+// ContainerJSONBase contains response of Remote API:
+// GET "/containers/{name:.*}/json"
+type ContainerJSONBase struct {
+ ID string `json:"Id"`
+ Created string
+ Path string
+ Args []string
+ State *ContainerState
+ Image string
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ LogPath string
+ Node *ContainerNode `json:",omitempty"`
+ Name string
+ RestartCount int
+ Driver string
+ MountLabel string
+ ProcessLabel string
+ AppArmorProfile string
+ ExecIDs []string
+ HostConfig *container.HostConfig
+ GraphDriver GraphDriverData
+ SizeRw *int64 `json:",omitempty"`
+ SizeRootFs *int64 `json:",omitempty"`
+}
+
+// ContainerJSON is newly used struct along with MountPoint
+type ContainerJSON struct {
+ *ContainerJSONBase
+ Mounts []MountPoint
+ Config *container.Config
+ NetworkSettings *NetworkSettings
+}
+
+// NetworkSettings exposes the network settings in the api
+type NetworkSettings struct {
+ NetworkSettingsBase
+ DefaultNetworkSettings
+ Networks map[string]*network.EndpointSettings
+}
+
+// SummaryNetworkSettings provides a summary of container's networks
+// in /containers/json
+type SummaryNetworkSettings struct {
+ Networks map[string]*network.EndpointSettings
+}
+
+// NetworkSettingsBase holds basic information about networks
+type NetworkSettingsBase struct {
+ Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
+ SandboxID string // SandboxID uniquely represents a container's network stack
+ HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
+ LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
+ Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
+ SandboxKey string // SandboxKey identifies the sandbox
+ SecondaryIPAddresses []network.Address
+ SecondaryIPv6Addresses []network.Address
+}
+
+// DefaultNetworkSettings holds network information
+// during the 2 release deprecation period.
+// It will be removed in Docker 1.11.
+type DefaultNetworkSettings struct {
+ EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
+ Gateway string // Gateway holds the gateway address for the network
+ GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
+ GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
+ IPAddress string // IPAddress holds the IPv4 address for the network
+ IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
+ IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
+ MacAddress string // MacAddress holds the MAC address for the network
+}
+
+// MountPoint represents a mount point configuration inside the container.
+type MountPoint struct {
+ Name string `json:",omitempty"`
+ Source string
+ Destination string
+ Driver string `json:",omitempty"`
+ Mode string
+ RW bool
+ Propagation string
+}
+
+// Volume represents the configuration of a volume for the remote API
+type Volume struct {
+ Name string // Name is the name of the volume
+ Driver string // Driver is the Driver name used to create the volume
+ Mountpoint string // Mountpoint is the location on disk of the volume
+ Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume
+ Labels map[string]string // Labels is metadata specific to the volume
+ Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level)
+}
+
+// VolumesListResponse contains the response for the remote API:
+// GET "/volumes"
+type VolumesListResponse struct {
+ Volumes []*Volume // Volumes is the list of volumes being returned
+ Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers
+}
+
+// VolumeCreateRequest contains the response for the remote API:
+// POST "/volumes/create"
+type VolumeCreateRequest struct {
+ Name string // Name is the requested name of the volume
+ Driver string // Driver is the name of the driver that should be used to create the volume
+ DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume.
+ Labels map[string]string // Labels holds metadata specific to the volume being created.
+}
+
+// NetworkResource is the body of the "get network" http response message
+type NetworkResource struct {
+ Name string // Name is the requested name of the network
+ ID string `json:"Id"` // ID uniquely identifies a network on a single machine
+ Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
+ Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
+ EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
+ IPAM network.IPAM // IPAM is the network's IP Address Management
+ Internal bool // Internal represents if the network is used internal only
+ Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
+ Options map[string]string // Options holds the network specific options to use for when creating the network
+ Labels map[string]string // Labels holds metadata specific to the network being created
+}
+
+// EndpointResource contains network resources allocated and used for a container in a network
+type EndpointResource struct {
+ Name string
+ EndpointID string
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// NetworkCreate is the expected body of the "create network" http request message
+type NetworkCreate struct {
+ CheckDuplicate bool
+ Driver string
+ EnableIPv6 bool
+ IPAM network.IPAM
+ Internal bool
+ Options map[string]string
+ Labels map[string]string
+}
+
+// NetworkCreateRequest is the request message sent to the server for network create call.
+type NetworkCreateRequest struct {
+ NetworkCreate
+ Name string
+}
+
+// NetworkCreateResponse is the response message sent by the server for network create call
+type NetworkCreateResponse struct {
+ ID string `json:"Id"`
+ Warning string
+}
+
+// NetworkConnect represents the data to be used to connect a container to the network
+type NetworkConnect struct {
+ Container string
+ EndpointConfig *network.EndpointSettings `json:",omitempty"`
+}
+
+// NetworkDisconnect represents the data to be used to disconnect a container from the network
+type NetworkDisconnect struct {
+ Container string
+ Force bool
+}
+
+// Checkpoint represents the details of a checkpoint
+type Checkpoint struct {
+ Name string // Name is the name of the checkpoint
+}
+
+// Runtime describes an OCI runtime
+type Runtime struct {
+ Path string `json:"path"`
+ Args []string `json:"runtimeArgs,omitempty"`
+}
diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md
new file mode 100644
index 000000000..cdac50a53
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/versions/README.md
@@ -0,0 +1,14 @@
+## Legacy API type versions
+
+This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
+
+Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
+
+### Package name conventions
+
+The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
+
+1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
+2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
+
+For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go
new file mode 100644
index 000000000..611d4fed6
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/versions/compare.go
@@ -0,0 +1,62 @@
+package versions
+
+import (
+ "strconv"
+ "strings"
+)
+
+// compare compares two version strings
+// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
+func compare(v1, v2 string) int {
+ var (
+ currTab = strings.Split(v1, ".")
+ otherTab = strings.Split(v2, ".")
+ )
+
+ max := len(currTab)
+ if len(otherTab) > max {
+ max = len(otherTab)
+ }
+ for i := 0; i < max; i++ {
+ var currInt, otherInt int
+
+ if len(currTab) > i {
+ currInt, _ = strconv.Atoi(currTab[i])
+ }
+ if len(otherTab) > i {
+ otherInt, _ = strconv.Atoi(otherTab[i])
+ }
+ if currInt > otherInt {
+ return 1
+ }
+ if otherInt > currInt {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LessThan checks if a version is less than another
+func LessThan(v, other string) bool {
+ return compare(v, other) == -1
+}
+
+// LessThanOrEqualTo checks if a version is less than or equal to another
+func LessThanOrEqualTo(v, other string) bool {
+ return compare(v, other) <= 0
+}
+
+// GreaterThan checks if a version is greater than another
+func GreaterThan(v, other string) bool {
+ return compare(v, other) == 1
+}
+
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
+func GreaterThanOrEqualTo(v, other string) bool {
+ return compare(v, other) >= 0
+}
+
+// Equal checks if a version is equal to another
+func Equal(v, other string) bool {
+ return compare(v, other) == 0
+}
diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md
new file mode 100644
index 000000000..d257e44fd
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/README.md
@@ -0,0 +1,13 @@
+[](https://godoc.org/github.com/docker/go-connections)
+
+# Introduction
+
+go-connections provides common package to work with network connections.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation.
+
+## License
+
+go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
new file mode 100644
index 000000000..4d5f5ae63
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/nat.go
@@ -0,0 +1,242 @@
+// Package nat is a convenience package for manipulation of strings describing network ports.
+package nat
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+const (
+ // portSpecTemplate is the expected format for port specifications
+ portSpecTemplate = "ip:hostPort:containerPort"
+)
+
+// PortBinding represents a binding between a Host IP address and a Host Port
+type PortBinding struct {
+ // HostIP is the host IP Address
+ HostIP string `json:"HostIp"`
+ // HostPort is the host port number
+ HostPort string
+}
+
+// PortMap is a collection of PortBinding indexed by Port
+type PortMap map[Port][]PortBinding
+
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// NewPort creates a new instance of a Port given a protocol and port number or port range
+func NewPort(proto, port string) (Port, error) {
+ // Check for parsing issues on "port" now so we can avoid having
+ // to check it later on.
+
+ portStartInt, portEndInt, err := ParsePortRangeToInt(port)
+ if err != nil {
+ return "", err
+ }
+
+ if portStartInt == portEndInt {
+ return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
+ }
+ return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
+}
+
+// ParsePort parses the port number string and returns an int
+func ParsePort(rawPort string) (int, error) {
+ if len(rawPort) == 0 {
+ return 0, nil
+ }
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// ParsePortRangeToInt parses the port range string and returns start/end ints
+func ParsePortRangeToInt(rawPort string) (int, int, error) {
+ if len(rawPort) == 0 {
+ return 0, 0, nil
+ }
+ start, end, err := ParsePortRange(rawPort)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(start), int(end), nil
+}
+
+// Proto returns the protocol of a Port
+func (p Port) Proto() string {
+ proto, _ := SplitProtoPort(string(p))
+ return proto
+}
+
+// Port returns the port number of a Port
+func (p Port) Port() string {
+ _, port := SplitProtoPort(string(p))
+ return port
+}
+
+// Int returns the port number of a Port as an int
+func (p Port) Int() int {
+ portStr := p.Port()
+ // We don't need to check for an error because we're going to
+ // assume that any error would have been found, and reported, in NewPort()
+ port, _ := ParsePort(portStr)
+ return port
+}
+
+// Range returns the start/end port numbers of a Port range as ints
+func (p Port) Range() (int, int, error) {
+ return ParsePortRangeToInt(p.Port())
+}
+
+// SplitProtoPort splits a port in the format of proto/port
+func SplitProtoPort(rawPort string) (string, string) {
+ parts := strings.Split(rawPort, "/")
+ l := len(parts)
+ if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
+ return "", ""
+ }
+ if l == 1 {
+ return "tcp", rawPort
+ }
+ if len(parts[1]) == 0 {
+ return "tcp", parts[0]
+ }
+ return parts[1], parts[0]
+}
+
+func validateProto(proto string) bool {
+ for _, availableProto := range []string{"tcp", "udp"} {
+ if availableProto == proto {
+ return true
+ }
+ }
+ return false
+}
+
+// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
+// these in to the internal types
+func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
+ var (
+ exposedPorts = make(map[Port]struct{}, len(ports))
+ bindings = make(map[Port][]PortBinding)
+ )
+ for _, rawPort := range ports {
+ portMappings, err := ParsePortSpec(rawPort)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, portMapping := range portMappings {
+ port := portMapping.Port
+ if _, exists := exposedPorts[port]; !exists {
+ exposedPorts[port] = struct{}{}
+ }
+ bslice, exists := bindings[port]
+ if !exists {
+ bslice = []PortBinding{}
+ }
+ bindings[port] = append(bslice, portMapping.Binding)
+ }
+ }
+ return exposedPorts, bindings, nil
+}
+
+// PortMapping is a data object mapping a Port to a PortBinding
+type PortMapping struct {
+ Port Port
+ Binding PortBinding
+}
+
+func splitParts(rawport string) (string, string, string) {
+ parts := strings.Split(rawport, ":")
+ n := len(parts)
+ containerport := parts[n-1]
+
+ switch n {
+ case 1:
+ return "", "", containerport
+ case 2:
+ return "", parts[0], containerport
+ case 3:
+ return parts[0], parts[1], containerport
+ default:
+ return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
+ }
+}
+
+// ParsePortSpec parses a port specification string into a slice of PortMappings
+func ParsePortSpec(rawPort string) ([]PortMapping, error) {
+ var proto string
+ rawIP, hostPort, containerPort := splitParts(rawPort)
+ proto, containerPort = SplitProtoPort(containerPort)
+
+ // Strip [] from IPV6 addresses
+ ip, _, err := net.SplitHostPort(rawIP + ":")
+ if err != nil {
+ return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
+ }
+ if ip != "" && net.ParseIP(ip) == nil {
+ return nil, fmt.Errorf("Invalid ip address: %s", ip)
+ }
+ if containerPort == "" {
+ return nil, fmt.Errorf("No port specified: %s", rawPort)
+ }
+
+ startPort, endPort, err := ParsePortRange(containerPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+ }
+
+ var startHostPort, endHostPort uint64 = 0, 0
+ if len(hostPort) > 0 {
+ startHostPort, endHostPort, err = ParsePortRange(hostPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
+ }
+ }
+
+ if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
+ // Allow host port range iff containerPort is not a range.
+ // In this case, use the host port range as the dynamic
+ // host port range to allocate into.
+ if endPort != startPort {
+ return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
+ }
+ }
+
+ if !validateProto(strings.ToLower(proto)) {
+ return nil, fmt.Errorf("Invalid proto: %s", proto)
+ }
+
+ ports := []PortMapping{}
+ for i := uint64(0); i <= (endPort - startPort); i++ {
+ containerPort = strconv.FormatUint(startPort+i, 10)
+ if len(hostPort) > 0 {
+ hostPort = strconv.FormatUint(startHostPort+i, 10)
+ }
+ // Set hostPort to a range only if there is a single container port
+ // and a dynamic host port.
+ if startPort == endPort && startHostPort != endHostPort {
+ hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
+ }
+ port, err := NewPort(strings.ToLower(proto), containerPort)
+ if err != nil {
+ return nil, err
+ }
+
+ binding := PortBinding{
+ HostIP: ip,
+ HostPort: hostPort,
+ }
+ ports = append(ports, PortMapping{Port: port, Binding: binding})
+ }
+ return ports, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
new file mode 100644
index 000000000..892adf8c6
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/parse.go
@@ -0,0 +1,57 @@
+package nat
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// PartParser parses and validates the specified string (data) using the specified template
+// e.g. ip:public:private -> 192.168.0.1:80:8000
+// DEPRECATED: do not use, this function may be removed in a future version
+func PartParser(template, data string) (map[string]string, error) {
+ // ip:public:private
+ var (
+ templateParts = strings.Split(template, ":")
+ parts = strings.Split(data, ":")
+ out = make(map[string]string, len(templateParts))
+ )
+ if len(parts) != len(templateParts) {
+ return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
+ }
+
+ for i, t := range templateParts {
+ value := ""
+ if len(parts) > i {
+ value = parts[i]
+ }
+ out[t] = value
+ }
+ return out, nil
+}
+
+// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
+func ParsePortRange(ports string) (uint64, uint64, error) {
+ if ports == "" {
+ return 0, 0, fmt.Errorf("Empty string specified for ports.")
+ }
+ if !strings.Contains(ports, "-") {
+ start, err := strconv.ParseUint(ports, 10, 16)
+ end := start
+ return start, end, err
+ }
+
+ parts := strings.Split(ports, "-")
+ start, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ if end < start {
+ return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
+ }
+ return start, end, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go
new file mode 100644
index 000000000..ce950171e
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/sort.go
@@ -0,0 +1,96 @@
+package nat
+
+import (
+ "sort"
+ "strings"
+)
+
+type portSorter struct {
+ ports []Port
+ by func(i, j Port) bool
+}
+
+func (s *portSorter) Len() int {
+ return len(s.ports)
+}
+
+func (s *portSorter) Swap(i, j int) {
+ s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
+}
+
+func (s *portSorter) Less(i, j int) bool {
+ ip := s.ports[i]
+ jp := s.ports[j]
+
+ return s.by(ip, jp)
+}
+
+// Sort sorts a list of ports using the provided predicate
+// This function should compare `i` and `j`, returning true if `i` is
+// considered to be less than `j`
+func Sort(ports []Port, predicate func(i, j Port) bool) {
+ s := &portSorter{ports, predicate}
+ sort.Sort(s)
+}
+
+type portMapEntry struct {
+ port Port
+ binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+ pi, pj := s[i].port, s[j].port
+ hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+ return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+ s := portMapSorter{}
+ for _, p := range ports {
+ if binding, ok := bindings[p]; ok {
+ for _, b := range binding {
+ s = append(s, portMapEntry{port: p, binding: b})
+ }
+ bindings[p] = []PortBinding{}
+ } else {
+ s = append(s, portMapEntry{port: p})
+ }
+ }
+
+ sort.Sort(s)
+ var (
+ i int
+ pm = make(map[Port]struct{})
+ )
+ // reorder ports
+ for _, entry := range s {
+ if _, ok := pm[entry.port]; !ok {
+ ports[i] = entry.port
+ pm[entry.port] = struct{}{}
+ i++
+ }
+ // reorder bindings for this port
+ if _, ok := bindings[entry.port]; ok {
+ bindings[entry.port] = append(bindings[entry.port], entry.binding)
+ }
+ }
+}
+
+func toInt(s string) uint64 {
+ i, _, err := ParsePortRange(s)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
new file mode 100644
index 000000000..99846ffdd
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
@@ -0,0 +1,81 @@
+package sockets
+
+import (
+ "errors"
+ "net"
+ "sync"
+)
+
+var errClosed = errors.New("use of closed network connection")
+
+// InmemSocket implements net.Listener using in-memory only connections.
+type InmemSocket struct {
+ chConn chan net.Conn
+ chClose chan struct{}
+ addr string
+ mu sync.Mutex
+}
+
+// dummyAddr is used to satisfy net.Addr for the in-mem socket
+// it is just stored as a string and returns the string for all calls
+type dummyAddr string
+
+// NewInmemSocket creates an in-memory only net.Listener
+// The addr argument can be any string, but is used to satisfy the `Addr()` part
+// of the net.Listener interface
+func NewInmemSocket(addr string, bufSize int) *InmemSocket {
+ return &InmemSocket{
+ chConn: make(chan net.Conn, bufSize),
+ chClose: make(chan struct{}),
+ addr: addr,
+ }
+}
+
+// Addr returns the socket's addr string to satisfy net.Listener
+func (s *InmemSocket) Addr() net.Addr {
+ return dummyAddr(s.addr)
+}
+
+// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
+func (s *InmemSocket) Accept() (net.Conn, error) {
+ select {
+ case conn := <-s.chConn:
+ return conn, nil
+ case <-s.chClose:
+ return nil, errClosed
+ }
+}
+
+// Close closes the listener. It will be unavailable for use once closed.
+func (s *InmemSocket) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ select {
+ case <-s.chClose:
+ default:
+ close(s.chClose)
+ }
+ return nil
+}
+
+// Dial is used to establish a connection with the in-mem server
+func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
+ srvConn, clientConn := net.Pipe()
+ select {
+ case s.chConn <- srvConn:
+ case <-s.chClose:
+ return nil, errClosed
+ }
+
+ return clientConn, nil
+}
+
+// Network returns the addr string, satisfies net.Addr
+func (a dummyAddr) Network() string {
+ return string(a)
+}
+
+// String returns the string form
+func (a dummyAddr) String() string {
+ return string(a)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
new file mode 100644
index 000000000..98e9a1dc6
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/proxy.go
@@ -0,0 +1,51 @@
+package sockets
+
+import (
+ "net"
+ "net/url"
+ "os"
+ "strings"
+
+ "golang.org/x/net/proxy"
+)
+
+// GetProxyEnv allows access to the uppercase and the lowercase forms of
+// proxy-related variables. See the Go specification for details on these
+// variables. https://golang.org/pkg/net/http/
+func GetProxyEnv(key string) string {
+ proxyValue := os.Getenv(strings.ToUpper(key))
+ if proxyValue == "" {
+ return os.Getenv(strings.ToLower(key))
+ }
+ return proxyValue
+}
+
+// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
+// proxy.Dialer which will route the connections through the proxy using the
+// given dialer.
+func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
+ allProxy := GetProxyEnv("all_proxy")
+ if len(allProxy) == 0 {
+ return direct, nil
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return direct, err
+ }
+
+ proxyFromURL, err := proxy.FromURL(proxyURL, direct)
+ if err != nil {
+ return direct, err
+ }
+
+ noProxy := GetProxyEnv("no_proxy")
+ if len(noProxy) == 0 {
+ return proxyFromURL, nil
+ }
+
+ perHost := proxy.NewPerHost(proxyFromURL, direct)
+ perHost.AddFromString(noProxy)
+
+ return perHost, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
new file mode 100644
index 000000000..a1d7beb4d
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets.go
@@ -0,0 +1,38 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "time"
+)
+
+// Why 32? See https://github.com/docker/docker/pull/8035.
+const defaultTimeout = 32 * time.Second
+
+// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
+var ErrProtocolNotAvailable = errors.New("protocol not available")
+
+// ConfigureTransport configures the specified Transport according to the
+// specified proto and addr.
+// If the proto is unix (using a unix socket to communicate) or npipe the
+// compression is disabled.
+func ConfigureTransport(tr *http.Transport, proto, addr string) error {
+ switch proto {
+ case "unix":
+ return configureUnixTransport(tr, proto, addr)
+ case "npipe":
+ return configureNpipeTransport(tr, proto, addr)
+ default:
+ tr.Proxy = http.ProxyFromEnvironment
+ dialer, err := DialerFromEnvironment(&net.Dialer{
+ Timeout: defaultTimeout,
+ })
+ if err != nil {
+ return err
+ }
+ tr.Dial = dialer.Dial
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
new file mode 100644
index 000000000..386cf0dbb
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
@@ -0,0 +1,35 @@
+// +build !windows
+
+package sockets
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "syscall"
+ "time"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return fmt.Errorf("Unix socket path %q is too long", addr)
+ }
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.DialTimeout(proto, addr, defaultTimeout)
+ }
+ return nil
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+// DialPipe connects to a Windows named pipe.
+// This is not supported on other OSes.
+func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
+ return nil, syscall.EAFNOSUPPORT
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
new file mode 100644
index 000000000..5c21644e1
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
@@ -0,0 +1,27 @@
+package sockets
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return DialPipe(addr, defaultTimeout)
+ }
+ return nil
+}
+
+// DialPipe connects to a Windows named pipe.
+func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
+ return winio.DialPipe(addr, &timeout)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
new file mode 100644
index 000000000..53cbb6c79
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
@@ -0,0 +1,22 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+// NewTCPSocket creates a TCP socket listener with the specified address and
+// the specified tls configuration. If TLSConfig is set, will encapsulate the
+// TCP listener inside a TLS one.
+func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ if tlsConfig != nil {
+ tlsConfig.NextProtos = []string{"http/1.1"}
+ l = tls.NewListener(l, tlsConfig)
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
new file mode 100644
index 000000000..a8b5dbb6f
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package sockets
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+// NewUnixSocket creates a unix socket with the specified path and group.
+func NewUnixSocket(path string, gid int) (net.Listener, error) {
+ if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ mask := syscall.Umask(0777)
+ defer syscall.Umask(mask)
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+ if err := os.Chown(path, 0, gid); err != nil {
+ l.Close()
+ return nil, err
+ }
+ if err := os.Chmod(path, 0660); err != nil {
+ l.Close()
+ return nil, err
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
new file mode 100644
index 000000000..1d5fa4c76
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
@@ -0,0 +1,21 @@
+// +build go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+ "runtime"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// SystemCertPool returns a copy of the system cert pool,
+// returns an error if failed to load or empty pool on windows.
+func SystemCertPool() (*x509.CertPool, error) {
+ certpool, err := x509.SystemCertPool()
+ if err != nil && runtime.GOOS == "windows" {
+ logrus.Infof("Unable to use system certificate pool: %v", err)
+ return x509.NewCertPool(), nil
+ }
+ return certpool, err
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
new file mode 100644
index 000000000..262c95e8c
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
@@ -0,0 +1,16 @@
+// +build !go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// SystemCertPool returns an new empty cert pool,
+// accessing system cert pool is supported in go 1.7
+func SystemCertPool() (*x509.CertPool, error) {
+ logrus.Warn("Unable to use system certificate pool: requires building with go 1.7 or later")
+ return x509.NewCertPool(), nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
new file mode 100644
index 000000000..ad4b112ab
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -0,0 +1,246 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
+// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
+// A Config may be reused; the tls package will also not modify it.
+package tlsconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/pkg/errors"
+)
+
+// Options represents the information needed to create client and server TLS configurations.
+type Options struct {
+ CAFile string
+
+ // If either CertFile or KeyFile is empty, Client() will not load them
+ // preventing the client from authenticating to the server.
+ // However, Server() requires them and will error out if they are empty.
+ CertFile string
+ KeyFile string
+
+ // client-only option
+ InsecureSkipVerify bool
+ // server-only option
+ ClientAuth tls.ClientAuthType
+ // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
+ // creds will include exclusively the roots in that CA file. If no CA file is provided,
+ // the system pool will be used.
+ ExclusiveRootPools bool
+ MinVersion uint16
+ // If Passphrase is set, it will be used to decrypt a TLS private key
+ // if the key is encrypted
+ Passphrase string
+}
+
+// Extra (server-side) accepted CBC cipher suites - will phase out in the future
+var acceptedCBCCiphers = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+}
+
+// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
+// options struct but wants to use a commonly accepted set of TLS cipher suites, with
+// known weak algorithms removed.
+var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
+
+// allTLSVersions lists all the TLS versions and is used by the code that validates
+// a uint16 value as a TLS version.
+var allTLSVersions = map[uint16]struct{}{
+ tls.VersionSSL30: {},
+ tls.VersionTLS10: {},
+ tls.VersionTLS11: {},
+ tls.VersionTLS12: {},
+}
+
+// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
+func ServerDefault() *tls.Config {
+ return &tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: DefaultServerAcceptedCiphers,
+ }
+}
+
+// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
+func ClientDefault() *tls.Config {
+ return &tls.Config{
+ // Prefer TLS1.2 as the client minimum
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: clientCipherSuites,
+ }
+}
+
+// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
+func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
+ // If we should verify the server, we need to load a trusted ca
+ var (
+ certPool *x509.CertPool
+ err error
+ )
+ if exclusivePool {
+ certPool = x509.NewCertPool()
+ } else {
+ certPool, err = SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read system certificates: %v", err)
+ }
+ }
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
+ }
+ if !certPool.AppendCertsFromPEM(pem) {
+ return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
+ }
+ logrus.Debugf("Trusting %d certs", len(certPool.Subjects()))
+ return certPool, nil
+}
+
+// isValidMinVersion checks that the input value is a valid tls minimum version
+func isValidMinVersion(version uint16) bool {
+ _, ok := allTLSVersions[version]
+ return ok
+}
+
+// adjustMinVersion sets the MinVersion on `config`, the input configuration.
+// It assumes the current MinVersion on the `config` is the lowest allowed.
+func adjustMinVersion(options Options, config *tls.Config) error {
+ if options.MinVersion > 0 {
+ if !isValidMinVersion(options.MinVersion) {
+ return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
+ }
+ if options.MinVersion < config.MinVersion {
+ return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
+ }
+ config.MinVersion = options.MinVersion
+ }
+
+ return nil
+}
+
+// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
+// password when tryin to decrypt a TLS private key
+func IsErrEncryptedKey(err error) bool {
+ return errors.Cause(err) == x509.IncorrectPasswordError
+}
+
+// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
+// If the private key is encrypted, 'passphrase' is used to decrypted the
+// private key.
+func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
+ // this section makes some small changes to code from notary/tuf/utils/x509.go
+ pemBlock, _ := pem.Decode(keyBytes)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("no valid private key found")
+ }
+
+ var err error
+ if x509.IsEncryptedPEMBlock(pemBlock) {
+ keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
+ }
+ keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
+ }
+
+ return keyBytes, nil
+}
+
+// getCert returns a Certificate from the CertFile and KeyFile in 'options',
+// if the key is encrypted, the Passphrase in 'options' will be used to
+// decrypt it.
+func getCert(options Options) ([]tls.Certificate, error) {
+ if options.CertFile == "" && options.KeyFile == "" {
+ return nil, nil
+ }
+
+ errMessage := "Could not load X509 key pair"
+
+ cert, err := ioutil.ReadFile(options.CertFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ return []tls.Certificate{tlsCert}, nil
+}
+
+// Client returns a TLS configuration meant to be used by a client.
+func Client(options Options) (*tls.Config, error) {
+ tlsConfig := ClientDefault()
+ tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
+ if !options.InsecureSkipVerify && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = CAs
+ }
+
+ tlsCerts, err := getCert(options)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = tlsCerts
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
+
+// Server returns a TLS configuration meant to be used by a server.
+func Server(options Options) (*tls.Config, error) {
+ tlsConfig := ServerDefault()
+ tlsConfig.ClientAuth = options.ClientAuth
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.ClientCAs = CAs
+ }
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
new file mode 100644
index 000000000..6b4c6a7c0
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
@@ -0,0 +1,17 @@
+// +build go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
new file mode 100644
index 000000000..ee22df47c
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
@@ -0,0 +1,15 @@
+// +build !go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 000000000..e2fb4051f
--- /dev/null
+++ b/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,13 @@
+[](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## License
+
+go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 000000000..c219a8a96
--- /dev/null
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 000000000..3b59daff3
--- /dev/null
+++ b/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 000000000..f0a7be292
--- /dev/null
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,109 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ limitVals := strings.SplitN(parts[1], ":", 2)
+ if len(limitVals) > 2 {
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ soft, err := strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ hard := soft // in case no hard was set
+ if len(limitVals) == 2 {
+ hard, err = strconv.ParseInt(limitVals[1], 10, 64)
+ }
+ if soft > hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE
new file mode 100644
index 000000000..bdc403653
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 The Linux Foundation.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/runtime-spec/README.md b/vendor/github.com/opencontainers/runtime-spec/README.md
new file mode 100644
index 000000000..317248dc8
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/README.md
@@ -0,0 +1,163 @@
+# Open Container Initiative Runtime Specification
+
+The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers.
+
+The specification can be found [here](spec.md).
+
+## Table of Contents
+
+Additional documentation about how this group operates:
+
+- [Code of Conduct][code-of-conduct]
+- [Style and Conventions](style.md)
+- [Roadmap](ROADMAP.md)
+- [Implementations](implementations.md)
+- [Releases](RELEASES.md)
+- [project](project.md)
+- [charter][charter]
+
+## Use Cases
+
+To provide context for users the following section gives example use cases for each part of the spec.
+
+### Application Bundle Builders
+
+Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container.
+The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups).
+Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments.
+
+### Hook Developers
+
+[Hook](config.md#hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application.
+Example use cases include sophisticated network configuration, volume garbage collection, etc.
+
+### Runtime Developers
+
+Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host specific details, on a particular platform.
+
+## Releases
+
+There is a loose [Road Map](./ROADMAP.md).
+During the `0.x` series of OCI releases we make no backwards compatibility guarantees and intend to break the schema during this series.
+
+## Contributing
+
+Development happens on GitHub for the spec.
+Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
+
+The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file.
+
+### Discuss your design
+
+The project welcomes submissions, but please let everyone know what you are working on.
+
+Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
+This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
+It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
+
+Typos and grammatical errors can go straight to a pull-request.
+When in doubt, start on the [mailing-list](#mailing-list).
+
+### Weekly Call
+
+The contributors and maintainers of all OCI projects have a weekly meeting on Wednesdays at:
+
+* 8:00 AM (USA Pacific), during [odd weeks][iso-week].
+* 5:00 PM (USA Pacific), during [even weeks][iso-week].
+
+There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics).
+
+Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: 415-968-0849 (no PIN needed.)
+An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
+Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived to the [wiki][runtime-wiki].
+
+### Mailing List
+
+You can subscribe and join the mailing list on [Google Groups][dev-list].
+
+### IRC
+
+OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
+
+### Git commit
+
+#### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
+The rules are pretty simple: if you can certify the below (from http://developercertificate.org):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+#### Commit Style
+
+Simple house-keeping for clean git history.
+Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1].
+
+1. Separate the subject from body with a blank line
+2. Limit the subject line to 50 characters
+3. Capitalize the subject line
+4. Do not end the subject line with a period
+5. Use the imperative mood in the subject line
+6. Wrap the body at 72 characters
+7. Use the body to explain what and why vs. how
+ * If there was important/useful/essential conversation or information, copy or include a reference
+8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
+
+
+[charter]: https://www.opencontainers.org/about/governance
+[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md
+[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev
+[how-to-git-commit]: http://chris.beams.io/posts/git-commit
+[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
+[iso-week]: https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_of_a_given_date
+[oci]: https://www.opencontainers.org
+[rfc5545]: https://tools.ietf.org/html/rfc5545
+[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki
+[uberconference]: https://www.uberconference.com/opencontainers
+
+[git-commit.1]: http://git-scm.com/docs/git-commit
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
new file mode 100644
index 000000000..3c744ed86
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -0,0 +1,563 @@
+package specs
+
+import "os"
+
+// Spec is the base configuration for the container.
+type Spec struct {
+ // Version of the Open Container Runtime Specification with which the bundle complies.
+ Version string `json:"ociVersion"`
+ // Platform specifies the configuration's target platform.
+ Platform Platform `json:"platform"`
+ // Process configures the container process.
+ Process Process `json:"process"`
+ // Root configures the container's root filesystem.
+ Root Root `json:"root"`
+ // Hostname configures the container's hostname.
+ Hostname string `json:"hostname,omitempty"`
+ // Mounts configures additional mounts (on top of Root).
+ Mounts []Mount `json:"mounts,omitempty"`
+ // Hooks configures callbacks for container lifecycle events.
+ Hooks *Hooks `json:"hooks,omitempty"`
+ // Annotations contains arbitrary metadata for the container.
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // Linux is platform specific configuration for Linux based containers.
+ Linux *Linux `json:"linux,omitempty" platform:"linux"`
+ // Solaris is platform specific configuration for Solaris containers.
+ Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"`
+ // Windows is platform specific configuration for Windows based containers, including Hyper-V containers.
+ Windows *Windows `json:"windows,omitempty" platform:"windows"`
+}
+
+// Process contains information to start a specific application inside the container.
+type Process struct {
+ // Terminal creates an interactive terminal for the container.
+ Terminal bool `json:"terminal,omitempty"`
+ // ConsoleSize specifies the size of the console.
+ ConsoleSize Box `json:"consoleSize,omitempty"`
+ // User specifies user information for the process.
+ User User `json:"user"`
+ // Args specifies the binary and arguments for the application to execute.
+ Args []string `json:"args"`
+ // Env populates the process environment for the process.
+ Env []string `json:"env,omitempty"`
+ // Cwd is the current working directory for the process and must be
+ // relative to the container's root.
+ Cwd string `json:"cwd"`
+ // Capabilities are Linux capabilities that are kept for the process.
+ Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"`
+ // Rlimits specifies rlimit options to apply to the process.
+ Rlimits []LinuxRlimit `json:"rlimits,omitempty" platform:"linux"`
+ // NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
+ NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"`
+ // ApparmorProfile specifies the apparmor profile for the container.
+ ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"`
+ // SelinuxLabel specifies the selinux context that the container process is run as.
+ SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
+}
+
+// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process.
+// http://man7.org/linux/man-pages/man7/capabilities.7.html
+type LinuxCapabilities struct {
+ // Bounding is the set of capabilities checked by the kernel.
+ Bounding []string `json:"bounding,omitempty" platform:"linux"`
+ // Effective is the set of capabilities checked by the kernel.
+ Effective []string `json:"effective,omitempty" platform:"linux"`
+ // Inheritable is the capabilities preserved across execve.
+ Inheritable []string `json:"inheritable,omitempty" platform:"linux"`
+ // Permitted is the limiting superset for effective capabilities.
+ Permitted []string `json:"permitted,omitempty" platform:"linux"`
+ // Ambient is the ambient set of capabilities that are kept.
+ Ambient []string `json:"ambient,omitempty" platform:"linux"`
+}
+
+// Box specifies dimensions of a rectangle. Used for specifying the size of a console.
+type Box struct {
+ // Height is the vertical dimension of a box.
+ Height uint `json:"height"`
+ // Width is the horizontal dimension of a box.
+ Width uint `json:"width"`
+}
+
+// User specifies specific user (and group) information for the container process.
+type User struct {
+ // UID is the user id.
+ UID uint32 `json:"uid" platform:"linux,solaris"`
+ // GID is the group id.
+ GID uint32 `json:"gid" platform:"linux,solaris"`
+ // AdditionalGids are additional group ids set for the container's process.
+ AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
+ // Username is the user name.
+ Username string `json:"username,omitempty" platform:"windows"`
+}
+
+// Root contains information about the container's root filesystem on the host.
+type Root struct {
+ // Path is the absolute path to the container's root filesystem.
+ Path string `json:"path"`
+ // Readonly makes the root filesystem for the container readonly before the process is executed.
+ Readonly bool `json:"readonly,omitempty"`
+}
+
+// Platform specifies OS and arch information for the host system that the container
+// is created for.
+type Platform struct {
+ // OS is the operating system.
+ OS string `json:"os"`
+ // Arch is the architecture
+ Arch string `json:"arch"`
+}
+
+// Mount specifies a mount for a container.
+type Mount struct {
+ // Destination is the path where the mount will be placed relative to the container's root. The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point.
+ Destination string `json:"destination"`
+ // Type specifies the mount kind.
+ Type string `json:"type,omitempty"`
+ // Source specifies the source path of the mount. In the case of bind mounts on
+ // Linux based systems this would be the file on the host.
+ Source string `json:"source,omitempty"`
+ // Options are fstab style mount options.
+ Options []string `json:"options,omitempty"`
+}
+
+// Hook specifies a command that is run at a particular event in the lifecycle of a container
+type Hook struct {
+ Path string `json:"path"`
+ Args []string `json:"args,omitempty"`
+ Env []string `json:"env,omitempty"`
+ Timeout *int `json:"timeout,omitempty"`
+}
+
+// Hooks for container setup and teardown
+type Hooks struct {
+ // Prestart is a list of hooks to be run before the container process is executed.
+ // On Linux, they are run after the container namespaces are created.
+ Prestart []Hook `json:"prestart,omitempty"`
+ // Poststart is a list of hooks to be run after the container process is started.
+ Poststart []Hook `json:"poststart,omitempty"`
+ // Poststop is a list of hooks to be run after the container process exits.
+ Poststop []Hook `json:"poststop,omitempty"`
+}
+
+// Linux contains platform specific configuration for Linux based containers.
+type Linux struct {
+ // UIDMapping specifies user mappings for supporting user namespaces on Linux.
+ UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"`
+ // GIDMapping specifies group mappings for supporting user namespaces on Linux.
+ GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"`
+ // Sysctl are a set of key value pairs that are set for the container on start
+ Sysctl map[string]string `json:"sysctl,omitempty"`
+ // Resources contain cgroup information for handling resource constraints
+ // for the container
+ Resources *LinuxResources `json:"resources,omitempty"`
+ // CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
+ // The path is expected to be relative to the cgroups mountpoint.
+ // If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
+ CgroupsPath string `json:"cgroupsPath,omitempty"`
+ // Namespaces contains the namespaces that are created and/or joined by the container
+ Namespaces []LinuxNamespace `json:"namespaces,omitempty"`
+ // Devices are a list of device nodes that are created for the container
+ Devices []LinuxDevice `json:"devices,omitempty"`
+ // Seccomp specifies the seccomp security settings for the container.
+ Seccomp *LinuxSeccomp `json:"seccomp,omitempty"`
+ // RootfsPropagation is the rootfs mount propagation mode for the container.
+ RootfsPropagation string `json:"rootfsPropagation,omitempty"`
+ // MaskedPaths masks over the provided paths inside the container.
+ MaskedPaths []string `json:"maskedPaths,omitempty"`
+ // ReadonlyPaths sets the provided paths as RO inside the container.
+ ReadonlyPaths []string `json:"readonlyPaths,omitempty"`
+ // MountLabel specifies the selinux context for the mounts in the container.
+ MountLabel string `json:"mountLabel,omitempty"`
+ // IntelRdt contains Intel Resource Director Technology (RDT) information
+ // for handling resource constraints (e.g., L3 cache) for the container
+ IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
+}
+
+// LinuxNamespace is the configuration for a Linux namespace
+type LinuxNamespace struct {
+ // Type is the type of Linux namespace
+ Type LinuxNamespaceType `json:"type"`
+ // Path is a path to an existing namespace persisted on disk that can be joined
+ // and is of the same type
+ Path string `json:"path,omitempty"`
+}
+
+// LinuxNamespaceType is one of the Linux namespaces
+type LinuxNamespaceType string
+
+const (
+ // PIDNamespace for isolating process IDs
+ PIDNamespace LinuxNamespaceType = "pid"
+ // NetworkNamespace for isolating network devices, stacks, ports, etc
+ NetworkNamespace = "network"
+ // MountNamespace for isolating mount points
+ MountNamespace = "mount"
+ // IPCNamespace for isolating System V IPC, POSIX message queues
+ IPCNamespace = "ipc"
+ // UTSNamespace for isolating hostname and NIS domain name
+ UTSNamespace = "uts"
+ // UserNamespace for isolating user and group IDs
+ UserNamespace = "user"
+ // CgroupNamespace for isolating cgroup hierarchies
+ CgroupNamespace = "cgroup"
+)
+
+// LinuxIDMapping specifies UID/GID mappings
+type LinuxIDMapping struct {
+ // HostID is the starting UID/GID on the host to be mapped to 'ContainerID'
+ HostID uint32 `json:"hostID"`
+ // ContainerID is the starting UID/GID in the container
+ ContainerID uint32 `json:"containerID"`
+ // Size is the number of IDs to be mapped
+ Size uint32 `json:"size"`
+}
+
+// LinuxRlimit type and restrictions
+type LinuxRlimit struct {
+ // Type of the rlimit to set
+ Type string `json:"type"`
+ // Hard is the hard limit for the specified type
+ Hard uint64 `json:"hard"`
+ // Soft is the soft limit for the specified type
+ Soft uint64 `json:"soft"`
+}
+
+// LinuxHugepageLimit structure corresponds to limiting kernel hugepages
+type LinuxHugepageLimit struct {
+ // Pagesize is the hugepage size
+ Pagesize string `json:"pageSize"`
+ // Limit is the limit of "hugepagesize" hugetlb usage
+ Limit uint64 `json:"limit"`
+}
+
+// LinuxInterfacePriority for network interfaces
+type LinuxInterfacePriority struct {
+ // Name is the name of the network interface
+ Name string `json:"name"`
+ // Priority for the interface
+ Priority uint32 `json:"priority"`
+}
+
+// linuxBlockIODevice holds major:minor format supported in blkio cgroup
+type linuxBlockIODevice struct {
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+}
+
+// LinuxWeightDevice struct holds a `major:minor weight` pair for blkioWeightDevice
+type LinuxWeightDevice struct {
+ linuxBlockIODevice
+ // Weight is the bandwidth rate for the device, range is from 10 to 1000
+ Weight *uint16 `json:"weight,omitempty"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
+ LeafWeight *uint16 `json:"leafWeight,omitempty"`
+}
+
+// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair
+type LinuxThrottleDevice struct {
+ linuxBlockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// LinuxBlockIO for Linux cgroup 'blkio' resource management
+type LinuxBlockIO struct {
+ // Specifies per cgroup weight, range is from 10 to 1000
+ Weight *uint16 `json:"blkioWeight,omitempty"`
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
+ LeafWeight *uint16 `json:"blkioLeafWeight,omitempty"`
+ // Weight per cgroup per device, can override BlkioWeight
+ WeightDevice []LinuxWeightDevice `json:"blkioWeightDevice,omitempty"`
+ // IO read rate limit per cgroup per device, bytes per second
+ ThrottleReadBpsDevice []LinuxThrottleDevice `json:"blkioThrottleReadBpsDevice,omitempty"`
+ // IO write rate limit per cgroup per device, bytes per second
+ ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"blkioThrottleWriteBpsDevice,omitempty"`
+ // IO read rate limit per cgroup per device, IO per second
+ ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"blkioThrottleReadIOPSDevice,omitempty"`
+ // IO write rate limit per cgroup per device, IO per second
+ ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"blkioThrottleWriteIOPSDevice,omitempty"`
+}
+
+// LinuxMemory for Linux cgroup 'memory' resource management
+type LinuxMemory struct {
+ // Memory limit (in bytes).
+ Limit *uint64 `json:"limit,omitempty"`
+ // Memory reservation or soft_limit (in bytes).
+ Reservation *uint64 `json:"reservation,omitempty"`
+ // Total memory limit (memory + swap).
+ Swap *uint64 `json:"swap,omitempty"`
+ // Kernel memory limit (in bytes).
+ Kernel *uint64 `json:"kernel,omitempty"`
+ // Kernel memory limit for tcp (in bytes)
+ KernelTCP *uint64 `json:"kernelTCP,omitempty"`
+ // How aggressive the kernel will swap memory pages. Range from 0 to 100.
+ Swappiness *uint64 `json:"swappiness,omitempty"`
+}
+
+// LinuxCPU for Linux cgroup 'cpu' resource management
+type LinuxCPU struct {
+ // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
+ Shares *uint64 `json:"shares,omitempty"`
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ Quota *int64 `json:"quota,omitempty"`
+ // CPU period to be used for hardcapping (in usecs).
+ Period *uint64 `json:"period,omitempty"`
+ // How much time realtime scheduling may use (in usecs).
+ RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"`
+ // CPU period to be used for realtime scheduling (in usecs).
+ RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"`
+ // CPUs to use within the cpuset. Default is to use any CPU available.
+ Cpus string `json:"cpus,omitempty"`
+ // List of memory nodes in the cpuset. Default is to use any available memory node.
+ Mems string `json:"mems,omitempty"`
+}
+
+// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3)
+type LinuxPids struct {
+ // Maximum number of PIDs. Default is "no limit".
+ Limit int64 `json:"limit"`
+}
+
+// LinuxNetwork identification and priority configuration
+type LinuxNetwork struct {
+ // Set class identifier for container's network packets
+ ClassID *uint32 `json:"classID,omitempty"`
+ // Set priority of network traffic for container
+ Priorities []LinuxInterfacePriority `json:"priorities,omitempty"`
+}
+
+// LinuxResources has container runtime resource constraints
+type LinuxResources struct {
+ // Devices configures the device whitelist.
+ Devices []LinuxDeviceCgroup `json:"devices,omitempty"`
+ // DisableOOMKiller disables the OOM killer for out of memory conditions
+ DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
+ // Specify an oom_score_adj for the container.
+ OOMScoreAdj *int `json:"oomScoreAdj,omitempty"`
+ // Memory restriction configuration
+ Memory *LinuxMemory `json:"memory,omitempty"`
+ // CPU resource restriction configuration
+ CPU *LinuxCPU `json:"cpu,omitempty"`
+ // Task resource restriction configuration.
+ Pids *LinuxPids `json:"pids,omitempty"`
+ // BlockIO restriction configuration
+ BlockIO *LinuxBlockIO `json:"blockIO,omitempty"`
+ // Hugetlb limit (in bytes)
+ HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"`
+ // Network restriction configuration
+ Network *LinuxNetwork `json:"network,omitempty"`
+}
+
+// LinuxDevice represents the mknod information for a Linux special device file
+type LinuxDevice struct {
+ // Path to the device.
+ Path string `json:"path"`
+ // Device type, block, char, etc.
+ Type string `json:"type"`
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+ // FileMode permission bits for the device.
+ FileMode *os.FileMode `json:"fileMode,omitempty"`
+ // UID of the device.
+ UID *uint32 `json:"uid,omitempty"`
+ // Gid of the device.
+ GID *uint32 `json:"gid,omitempty"`
+}
+
+// LinuxDeviceCgroup represents a device rule for the whitelist controller
+type LinuxDeviceCgroup struct {
+ // Allow or deny
+ Allow bool `json:"allow"`
+ // Device type, block, char, etc.
+ Type string `json:"type,omitempty"`
+ // Major is the device's major number.
+ Major *int64 `json:"major,omitempty"`
+ // Minor is the device's minor number.
+ Minor *int64 `json:"minor,omitempty"`
+ // Cgroup access permissions format, rwm.
+ Access string `json:"access,omitempty"`
+}
+
+// Solaris contains platform specific configuration for Solaris application containers.
+type Solaris struct {
+ // SMF FMRI which should go "online" before we start the container process.
+ Milestone string `json:"milestone,omitempty"`
+ // Maximum set of privileges any process in this container can obtain.
+ LimitPriv string `json:"limitpriv,omitempty"`
+ // The maximum amount of shared memory allowed for this container.
+ MaxShmMemory string `json:"maxShmMemory,omitempty"`
+ // Specification for automatic creation of network resources for this container.
+ Anet []SolarisAnet `json:"anet,omitempty"`
+ // Set limit on the amount of CPU time that can be used by container.
+ CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"`
+ // The physical and swap caps on the memory that can be used by this container.
+ CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"`
+}
+
+// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container.
+type SolarisCappedCPU struct {
+ Ncpus string `json:"ncpus,omitempty"`
+}
+
+// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container.
+type SolarisCappedMemory struct {
+ Physical string `json:"physical,omitempty"`
+ Swap string `json:"swap,omitempty"`
+}
+
+// SolarisAnet provides the specification for automatic creation of network resources for this container.
+type SolarisAnet struct {
+ // Specify a name for the automatically created VNIC datalink.
+ Linkname string `json:"linkname,omitempty"`
+ // Specify the link over which the VNIC will be created.
+ Lowerlink string `json:"lowerLink,omitempty"`
+ // The set of IP addresses that the container can use.
+ Allowedaddr string `json:"allowedAddress,omitempty"`
+ // Specifies whether allowedAddress limitation is to be applied to the VNIC.
+ Configallowedaddr string `json:"configureAllowedAddress,omitempty"`
+ // The value of the optional default router.
+ Defrouter string `json:"defrouter,omitempty"`
+ // Enable one or more types of link protection.
+ Linkprotection string `json:"linkProtection,omitempty"`
+ // Set the VNIC's macAddress
+ Macaddress string `json:"macAddress,omitempty"`
+}
+
+// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers.
+type Windows struct {
+ // Resources contains information for handling resource constraints for the container.
+ Resources *WindowsResources `json:"resources,omitempty"`
+}
+
+// WindowsResources has container runtime resource constraints for containers running on Windows.
+type WindowsResources struct {
+ // Memory restriction configuration.
+ Memory *WindowsMemoryResources `json:"memory,omitempty"`
+ // CPU resource restriction configuration.
+ CPU *WindowsCPUResources `json:"cpu,omitempty"`
+ // Storage restriction configuration.
+ Storage *WindowsStorageResources `json:"storage,omitempty"`
+ // Network restriction configuration.
+ Network *WindowsNetworkResources `json:"network,omitempty"`
+}
+
+// WindowsMemoryResources contains memory resource management settings.
+type WindowsMemoryResources struct {
+ // Memory limit in bytes.
+ Limit *uint64 `json:"limit,omitempty"`
+ // Memory reservation in bytes.
+ Reservation *uint64 `json:"reservation,omitempty"`
+}
+
+// WindowsCPUResources contains CPU resource management settings.
+type WindowsCPUResources struct {
+ // Number of CPUs available to the container.
+ Count *uint64 `json:"count,omitempty"`
+ // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000.
+ Shares *uint16 `json:"shares,omitempty"`
+ // Percent of available CPUs usable by the container.
+ Percent *uint8 `json:"percent,omitempty"`
+}
+
+// WindowsStorageResources contains storage resource management settings.
+type WindowsStorageResources struct {
+ // Specifies maximum Iops for the system drive.
+ Iops *uint64 `json:"iops,omitempty"`
+ // Specifies maximum bytes per second for the system drive.
+ Bps *uint64 `json:"bps,omitempty"`
+ // Sandbox size specifies the minimum size of the system drive in bytes.
+ SandboxSize *uint64 `json:"sandboxSize,omitempty"`
+}
+
+// WindowsNetworkResources contains network resource management settings.
+type WindowsNetworkResources struct {
+ // EgressBandwidth is the maximum egress bandwidth in bytes per second.
+ EgressBandwidth *uint64 `json:"egressBandwidth,omitempty"`
+}
+
+// LinuxSeccomp represents syscall restrictions
+type LinuxSeccomp struct {
+ DefaultAction LinuxSeccompAction `json:"defaultAction"`
+ Architectures []Arch `json:"architectures,omitempty"`
+ Syscalls []LinuxSyscall `json:"syscalls"`
+}
+
+// Arch used for additional architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+ ArchPARISC Arch = "SCMP_ARCH_PARISC"
+ ArchPARISC64 Arch = "SCMP_ARCH_PARISC64"
+)
+
+// LinuxSeccompAction taken upon Seccomp rule match
+type LinuxSeccompAction string
+
+// Define actions for Seccomp rules
+const (
+ ActKill LinuxSeccompAction = "SCMP_ACT_KILL"
+ ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP"
+ ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO"
+ ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE"
+ ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW"
+)
+
+// LinuxSeccompOperator used to match syscall arguments in Seccomp
+type LinuxSeccompOperator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE"
+ OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT"
+ OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE"
+ OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ"
+ OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE"
+ OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT"
+ OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ"
+)
+
+// LinuxSeccompArg used for matching specific syscall arguments in Seccomp
+type LinuxSeccompArg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo"`
+ Op LinuxSeccompOperator `json:"op"`
+}
+
+// LinuxSyscall is used to match a syscall in Seccomp
+type LinuxSyscall struct {
+ Names []string `json:"names"`
+ Action LinuxSeccompAction `json:"action"`
+ Args []LinuxSeccompArg `json:"args"`
+}
+
+// LinuxIntelRdt has container runtime resource constraints
+// for Intel RDT/CAT which introduced in Linux 4.10 kernel
+type LinuxIntelRdt struct {
+ // The schema for L3 cache id and capacity bitmask (CBM)
+ // Format: "L3:=;=;..."
+ L3CacheSchema string `json:"l3CacheSchema,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
new file mode 100644
index 000000000..b5dd3bee8
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
@@ -0,0 +1,17 @@
+package specs
+
+// State holds information about the runtime state of the container.
+type State struct {
+ // Version is the version of the specification that is supported.
+ Version string `json:"ociVersion"`
+ // ID is the container ID
+ ID string `json:"id"`
+ // Status is the runtime status of the container.
+ Status string `json:"status"`
+ // Pid is the process ID for the container process.
+ Pid int `json:"pid"`
+ // Bundle is the path to the container's bundle directory.
+ Bundle string `json:"bundle"`
+ // Annotations are key values associated with the container.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
new file mode 100644
index 000000000..dfcf0090e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
@@ -0,0 +1,18 @@
+package specs
+
+import "fmt"
+
+const (
+ // VersionMajor is for an API incompatible changes
+ VersionMajor = 1
+ // VersionMinor is for functionality in a backwards-compatible manner
+ VersionMinor = 0
+ // VersionPatch is for backwards-compatible bug fixes
+ VersionPatch = 0
+
+ // VersionDev indicates development branch. Releases will be empty string.
+ VersionDev = "-rc5-dev"
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 000000000..835ba3e75
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 000000000..273db3c98
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,52 @@
+# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## Licence
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 000000000..842ee8045
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,269 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// and the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required the errors.WithStack and errors.WithMessage
+// functions destructure errors.Wrap into its component operations of annotating
+// an error with a stack trace and an a message, respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error which does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// causer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface.
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// Where errors.StackTrace is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d", f)
+// }
+// }
+//
+// stackTracer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is call, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 000000000..6b1f2891a
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,178 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s path of source file relative to the compile time GOPATH
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ pc := f.pc()
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ io.WriteString(s, "unknown")
+ } else {
+ file, _ := fn.FileLine(pc)
+ fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
+ }
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ fmt.Fprintf(s, "%d", f.line())
+ case 'n':
+ name := runtime.FuncForPC(f.pc()).Name()
+ io.WriteString(s, funcname(name))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ fmt.Fprintf(s, "\n%+v", f)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ fmt.Fprintf(s, "%v", []Frame(st))
+ }
+ case 's':
+ fmt.Fprintf(s, "%s", []Frame(st))
+ }
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
+
+func trimGOPATH(name, file string) string {
+ // Here we want to get the source file path relative to the compile time
+ // GOPATH. As of Go 1.6.x there is no direct way to know the compiled
+ // GOPATH at runtime, but we can infer the number of path segments in the
+ // GOPATH. We note that fn.Name() returns the function name qualified by
+ // the import path, which does not include the GOPATH. Thus we can trim
+ // segments from the beginning of the file path until the number of path
+ // separators remaining is one more than the number of path separators in
+ // the function name. For example, given:
+ //
+ // GOPATH /home/user
+ // file /home/user/src/pkg/sub/file.go
+ // fn.Name() pkg/sub.Type.Method
+ //
+ // We want to produce:
+ //
+ // pkg/sub/file.go
+ //
+ // From this we can easily see that fn.Name() has one less path separator
+ // than our desired output. We count separators from the end of the file
+ // path until it finds two more than in the function name and then move
+ // one character forward to preserve the initial path segment without a
+ // leading separator.
+ const sep = "/"
+ goal := strings.Count(name, sep) + 2
+ i := len(file)
+ for n := 0; n < goal; n++ {
+ i = strings.LastIndex(file[:i], sep)
+ if i == -1 {
+ // not enough separators found, set i so that the slice expression
+ // below leaves file unmodified
+ i = -len(sep)
+ break
+ }
+ }
+ // get back to 0 or trim the leading separator
+ file = file[i+len(sep):]
+ return file
+}
diff --git a/vendor/github.com/rneugeba/iso9660wrap/README.md b/vendor/github.com/rneugeba/iso9660wrap/README.md
index 80e541b32..918b62aac 100644
--- a/vendor/github.com/rneugeba/iso9660wrap/README.md
+++ b/vendor/github.com/rneugeba/iso9660wrap/README.md
@@ -1,3 +1,4 @@
iso9660wrap
===========
-This turns the [iso9660wrap](https://github.com/johto/iso9660wrap) utility into a package. It provides a simple means to create an ISO9660 file containing a single file.
+This turns the [iso9660wrap](https://github.com/johto/iso9660wrap) utility into a package. It provides a simple means to create an ISO9660 file containing a single file.
+
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
new file mode 100644
index 000000000..4c5ad88b1
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/direct.go
@@ -0,0 +1,18 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "net"
+)
+
+type direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var Direct = direct{}
+
+func (direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
new file mode 100644
index 000000000..f540b196f
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -0,0 +1,140 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "net"
+ "strings"
+)
+
+// A PerHost directs connections to a default Dialer unless the hostname
+// requested matches one of a number of exceptions.
+type PerHost struct {
+ def, bypass Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
+ return &PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *PerHost) dialerForRequest(host string) Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone "example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a hostname
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a hostname that will use the bypass proxy.
+func (p *PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
new file mode 100644
index 000000000..78a8b7bee
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/proxy.go
@@ -0,0 +1,94 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+package proxy // import "golang.org/x/net/proxy"
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "os"
+)
+
+// A Dialer is a means to establish a connection.
+type Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func FromEnvironment() Dialer {
+ allProxy := os.Getenv("all_proxy")
+ if len(allProxy) == 0 {
+ return Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return Direct
+ }
+ proxy, err := FromURL(proxyURL, Direct)
+ if err != nil {
+ return Direct
+ }
+
+ noProxy := os.Getenv("no_proxy")
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := NewPerHost(proxy, Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
+ if proxySchemes == nil {
+ proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
+ }
+ proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
+ var auth *Auth
+ if u.User != nil {
+ auth = new(Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxySchemes != nil {
+ if f, ok := proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
new file mode 100644
index 000000000..973f57f19
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/socks5.go
@@ -0,0 +1,213 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "errors"
+ "io"
+ "net"
+ "strconv"
+)
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928.
+func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) {
+ s := &socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type socks5 struct {
+ user, password string
+ network, addr string
+ forward Dialer
+}
+
+const socks5Version = 5
+
+const (
+ socks5AuthNone = 0
+ socks5AuthPassword = 2
+)
+
+const socks5Connect = 1
+
+const (
+ socks5IP4 = 1
+ socks5Domain = 3
+ socks5IP6 = 4
+)
+
+var socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the network net via the SOCKS5 proxy.
+func (s *socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ if buf[1] == socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination hostname too long: " + host)
+ }
+ buf = append(buf, socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(socks5Errors) {
+ failure = socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}